id
stringlengths 23
25
| content
stringlengths 1.16k
88k
| max_stars_repo_path
stringlengths 12
48
|
|---|---|---|
codereval_python_data_1
|
Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import (
datetime,
time,
timedelta,
)
from ....time import (
Date,
DateTime,
Duration,
Time,
)
from ...packstream import Structure
def get_date_unix_epoch():
return Date(1970, 1, 1)
def get_date_unix_epoch_ordinal():
return get_date_unix_epoch().to_ordinal()
def get_datetime_unix_epoch_utc():
from pytz import utc
return DateTime(1970, 1, 1, 0, 0, 0, utc)
def hydrate_date(days):
""" Hydrator for `Date` values.
:param days:
:return: Date
"""
return Date.from_ordinal(get_date_unix_epoch_ordinal() + days)
def dehydrate_date(value):
""" Dehydrator for `date` values.
:param value:
:type value: Date
:return:
"""
return Structure(b"D", value.toordinal() - get_date_unix_epoch().toordinal())
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
def hydrate_datetime(seconds, nanoseconds, tz=None):
""" Hydrator for `DateTime` and `LocalDateTime` values.
:param seconds:
:param nanoseconds:
:param tz:
:return: datetime
"""
from pytz import (
FixedOffset,
timezone,
)
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
days, hours = map(int, divmod(hours, 24))
t = DateTime.combine(
Date.from_ordinal(get_date_unix_epoch_ordinal() + days),
Time(hours, minutes, seconds, nanoseconds)
)
if tz is None:
return t
if isinstance(tz, int):
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
else:
zone = timezone(tz)
return zone.localize(t)
def dehydrate_datetime(value):
""" Dehydrator for `datetime` values.
:param value:
:type value: datetime or DateTime
:return:
"""
def seconds_and_nanoseconds(dt):
if isinstance(dt, datetime):
dt = DateTime.from_native(dt)
zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)
dt_clock_time = dt.to_clock_time()
zone_epoch_clock_time = zone_epoch.to_clock_time()
t = dt_clock_time - zone_epoch_clock_time
return t.seconds, t.nanoseconds
tz = value.tzinfo
if tz is None:
# without time zone
from pytz import utc
value = utc.localize(value)
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"d", seconds, nanoseconds)
elif hasattr(tz, "zone") and tz.zone and isinstance(tz.zone, str):
# with named pytz time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.zone)
elif hasattr(tz, "key") and tz.key and isinstance(tz.key, str):
# with named zoneinfo (Python 3.9+) time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.key)
else:
# with time offset
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"F", seconds, nanoseconds,
int(tz.utcoffset(value).total_seconds()))
def hydrate_duration(months, days, seconds, nanoseconds):
""" Hydrator for `Duration` values.
:param months:
:param days:
:param seconds:
:param nanoseconds:
:return: `duration` namedtuple
"""
return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, value.nanoseconds)
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
|
neo4j/_codec/hydration/v1/temporal.py
|
codereval_python_data_2
|
Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import (
datetime,
time,
timedelta,
)
from ....time import (
Date,
DateTime,
Duration,
Time,
)
from ...packstream import Structure
def get_date_unix_epoch():
return Date(1970, 1, 1)
def get_date_unix_epoch_ordinal():
return get_date_unix_epoch().to_ordinal()
def get_datetime_unix_epoch_utc():
from pytz import utc
return DateTime(1970, 1, 1, 0, 0, 0, utc)
def hydrate_date(days):
""" Hydrator for `Date` values.
:param days:
:return: Date
"""
return Date.from_ordinal(get_date_unix_epoch_ordinal() + days)
def dehydrate_date(value):
""" Dehydrator for `date` values.
:param value:
:type value: Date
:return:
"""
return Structure(b"D", value.toordinal() - get_date_unix_epoch().toordinal())
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
def hydrate_datetime(seconds, nanoseconds, tz=None):
""" Hydrator for `DateTime` and `LocalDateTime` values.
:param seconds:
:param nanoseconds:
:param tz:
:return: datetime
"""
from pytz import (
FixedOffset,
timezone,
)
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
days, hours = map(int, divmod(hours, 24))
t = DateTime.combine(
Date.from_ordinal(get_date_unix_epoch_ordinal() + days),
Time(hours, minutes, seconds, nanoseconds)
)
if tz is None:
return t
if isinstance(tz, int):
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
else:
zone = timezone(tz)
return zone.localize(t)
def dehydrate_datetime(value):
""" Dehydrator for `datetime` values.
:param value:
:type value: datetime or DateTime
:return:
"""
def seconds_and_nanoseconds(dt):
if isinstance(dt, datetime):
dt = DateTime.from_native(dt)
zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)
dt_clock_time = dt.to_clock_time()
zone_epoch_clock_time = zone_epoch.to_clock_time()
t = dt_clock_time - zone_epoch_clock_time
return t.seconds, t.nanoseconds
tz = value.tzinfo
if tz is None:
# without time zone
from pytz import utc
value = utc.localize(value)
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"d", seconds, nanoseconds)
elif hasattr(tz, "zone") and tz.zone and isinstance(tz.zone, str):
# with named pytz time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.zone)
elif hasattr(tz, "key") and tz.key and isinstance(tz.key, str):
# with named zoneinfo (Python 3.9+) time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.key)
else:
# with time offset
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"F", seconds, nanoseconds,
int(tz.utcoffset(value).total_seconds()))
def hydrate_duration(months, days, seconds, nanoseconds):
""" Hydrator for `Duration` values.
:param months:
:param days:
:param seconds:
:param nanoseconds:
:return: `duration` namedtuple
"""
return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, value.nanoseconds)
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
|
neo4j/_codec/hydration/v1/temporal.py
|
codereval_python_data_3
|
Dehydrator for `time` values.
:param value:
:type value: Time
:return:
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import (
datetime,
time,
timedelta,
)
from ....time import (
Date,
DateTime,
Duration,
Time,
)
from ...packstream import Structure
def get_date_unix_epoch():
return Date(1970, 1, 1)
def get_date_unix_epoch_ordinal():
return get_date_unix_epoch().to_ordinal()
def get_datetime_unix_epoch_utc():
from pytz import utc
return DateTime(1970, 1, 1, 0, 0, 0, utc)
def hydrate_date(days):
""" Hydrator for `Date` values.
:param days:
:return: Date
"""
return Date.from_ordinal(get_date_unix_epoch_ordinal() + days)
def dehydrate_date(value):
""" Dehydrator for `date` values.
:param value:
:type value: Date
:return:
"""
return Structure(b"D", value.toordinal() - get_date_unix_epoch().toordinal())
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
def hydrate_datetime(seconds, nanoseconds, tz=None):
""" Hydrator for `DateTime` and `LocalDateTime` values.
:param seconds:
:param nanoseconds:
:param tz:
:return: datetime
"""
from pytz import (
FixedOffset,
timezone,
)
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
days, hours = map(int, divmod(hours, 24))
t = DateTime.combine(
Date.from_ordinal(get_date_unix_epoch_ordinal() + days),
Time(hours, minutes, seconds, nanoseconds)
)
if tz is None:
return t
if isinstance(tz, int):
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
else:
zone = timezone(tz)
return zone.localize(t)
def dehydrate_datetime(value):
""" Dehydrator for `datetime` values.
:param value:
:type value: datetime or DateTime
:return:
"""
def seconds_and_nanoseconds(dt):
if isinstance(dt, datetime):
dt = DateTime.from_native(dt)
zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)
dt_clock_time = dt.to_clock_time()
zone_epoch_clock_time = zone_epoch.to_clock_time()
t = dt_clock_time - zone_epoch_clock_time
return t.seconds, t.nanoseconds
tz = value.tzinfo
if tz is None:
# without time zone
from pytz import utc
value = utc.localize(value)
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"d", seconds, nanoseconds)
elif hasattr(tz, "zone") and tz.zone and isinstance(tz.zone, str):
# with named pytz time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.zone)
elif hasattr(tz, "key") and tz.key and isinstance(tz.key, str):
# with named zoneinfo (Python 3.9+) time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.key)
else:
# with time offset
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"F", seconds, nanoseconds,
int(tz.utcoffset(value).total_seconds()))
def hydrate_duration(months, days, seconds, nanoseconds):
""" Hydrator for `Duration` values.
:param months:
:param days:
:param seconds:
:param nanoseconds:
:return: `duration` namedtuple
"""
return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, value.nanoseconds)
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
|
neo4j/_codec/hydration/v1/temporal.py
|
codereval_python_data_4
|
Dehydrator for Point data.
:param value:
:type value: Point
:return:
def dehydrate_point(value):
""" Dehydrator for Point data.
:param value:
:type value: Point
:return:
"""
dim = len(value)
if dim == 2:
return Structure(b"X", value.srid, *value)
elif dim == 3:
return Structure(b"Y", value.srid, *value)
else:
raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...._spatial import (
Point,
srid_table,
)
from ...packstream import Structure
def hydrate_point(srid, *coordinates):
""" Create a new instance of a Point subclass from a raw
set of fields. The subclass chosen is determined by the
given SRID; a ValueError will be raised if no such
subclass can be found.
"""
try:
point_class, dim = srid_table[srid]
except KeyError:
point = Point(coordinates)
point.srid = srid
return point
else:
if len(coordinates) != dim:
raise ValueError("SRID %d requires %d coordinates (%d provided)" % (srid, dim, len(coordinates)))
return point_class(coordinates)
def dehydrate_point(value):
""" Dehydrator for Point data.
:param value:
:type value: Point
:return:
"""
dim = len(value)
if dim == 2:
return Structure(b"X", value.srid, *value)
elif dim == 3:
return Structure(b"Y", value.srid, *value)
else:
raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
__all__ = [
"hydrate_point",
"dehydrate_point",
]
|
neo4j/_codec/hydration/v1/spatial.py
|
codereval_python_data_5
|
Return the keys of the record.
:return: list of key names
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_6
|
Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import Bolt3
from ._bolt4 import (
Bolt4x1,
Bolt4x2,
Bolt4x3,
Bolt4x4,
)
from ._bolt5 import Bolt5x0
handlers = {
Bolt3.PROTOCOL_VERSION: Bolt3,
# 4.0 unsupported because no space left in the handshake
Bolt4x1.PROTOCOL_VERSION: Bolt4x1,
Bolt4x2.PROTOCOL_VERSION: Bolt4x2,
Bolt4x3.PROTOCOL_VERSION: Bolt4x3,
Bolt4x4.PROTOCOL_VERSION: Bolt4x4,
Bolt5x0.PROTOCOL_VERSION: Bolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
from collections import deque
from logging import getLogger
from time import perf_counter
from ..._async_compat.network import BoltSocket
from ..._async_compat.util import Util
from ..._codec.hydration import v1 as hydration_v1
from ..._codec.packstream import v1 as packstream_v1
from ..._conf import PoolConfig
from ..._exceptions import (
BoltError,
BoltHandshakeError,
SocketDeadlineExceeded,
)
from ..._meta import get_user_agent
from ...addressing import Address
from ...api import (
ServerInfo,
Version,
)
from ...exceptions import (
AuthError,
DriverError,
IncompleteCommit,
ServiceUnavailable,
SessionExpired,
)
from ._common import (
CommitResponse,
Inbox,
Outbox,
)
# Set up logger
log = getLogger("neo4j")
class Bolt:
""" Server connection for Bolt protocol.
A :class:`.Bolt` should be constructed following a
successful .open()
Bolt handshake and takes the socket over which
the handshake was carried out.
"""
# TODO: let packer/unpacker know of hydration (give them hooks?)
# TODO: make sure query parameter dehydration gets clear error message.
PACKER_CLS = packstream_v1.Packer
UNPACKER_CLS = packstream_v1.Unpacker
HYDRATION_HANDLER_CLS = hydration_v1.HydrationHandler
MAGIC_PREAMBLE = b"\x60\x60\xB0\x17"
PROTOCOL_VERSION = None
# flag if connection needs RESET to go back to READY state
is_reset = False
# The socket
in_use = False
# When the connection was last put back into the pool
idle_since = float("-inf")
# The socket
_closing = False
_closed = False
# The socket
_defunct = False
#: The pool of which this connection is a member
pool = None
# Store the id of the most recent ran query to be able to reduce sent bits by
# using the default (-1) to refer to the most recent query when pulling
# results for it.
most_recent_qid = None
def __init__(self, unresolved_address, sock, max_connection_lifetime, *,
auth=None, user_agent=None, routing_context=None):
self.unresolved_address = unresolved_address
self.socket = sock
self.local_port = self.socket.getsockname()[1]
self.server_info = ServerInfo(Address(sock.getpeername()),
self.PROTOCOL_VERSION)
# so far `connection.recv_timeout_seconds` is the only available
# configuration hint that exists. Therefore, all hints can be stored at
# connection level. This might change in the future.
self.configuration_hints = {}
self.patch = {}
self.outbox = Outbox(
self.socket, on_error=self._set_defunct_write,
packer_cls=self.PACKER_CLS
)
self.inbox = Inbox(
self.socket, on_error=self._set_defunct_read,
unpacker_cls=self.UNPACKER_CLS
)
self.hydration_handler = self.HYDRATION_HANDLER_CLS()
self.responses = deque()
self._max_connection_lifetime = max_connection_lifetime
self._creation_timestamp = perf_counter()
self.routing_context = routing_context
self.idle_since = perf_counter()
# Determine the user agent
if user_agent:
self.user_agent = user_agent
else:
self.user_agent = get_user_agent()
# Determine auth details
if not auth:
self.auth_dict = {}
elif isinstance(auth, tuple) and 2 <= len(auth) <= 3:
from neo4j import Auth
self.auth_dict = vars(Auth("basic", *auth))
else:
try:
self.auth_dict = vars(auth)
except (KeyError, TypeError):
raise AuthError("Cannot determine auth details from %r" % auth)
# Check for missing password
try:
credentials = self.auth_dict["credentials"]
except KeyError:
pass
else:
if credentials is None:
raise AuthError("Password cannot be None")
def __del__(self):
if not asyncio.iscoroutinefunction(self.close):
self.close()
@property
@abc.abstractmethod
def supports_multiple_results(self):
""" Boolean flag to indicate if the connection version supports multiple
queries to be buffered on the server side (True) or if all results need
to be eagerly pulled before sending the next RUN (False).
"""
pass
@property
@abc.abstractmethod
def supports_multiple_databases(self):
""" Boolean flag to indicate if the connection version supports multiple
databases.
"""
pass
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import Bolt3
from ._bolt4 import (
Bolt4x1,
Bolt4x2,
Bolt4x3,
Bolt4x4,
)
from ._bolt5 import Bolt5x0
handlers = {
Bolt3.PROTOCOL_VERSION: Bolt3,
# 4.0 unsupported because no space left in the handshake
Bolt4x1.PROTOCOL_VERSION: Bolt4x1,
Bolt4x2.PROTOCOL_VERSION: Bolt4x2,
Bolt4x3.PROTOCOL_VERSION: Bolt4x3,
Bolt4x4.PROTOCOL_VERSION: Bolt4x4,
Bolt5x0.PROTOCOL_VERSION: Bolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
@classmethod
def version_list(cls, versions, limit=4):
""" Return a list of supported protocol versions in order of
preference. The number of protocol versions (or ranges)
returned is limited to four.
"""
# In fact, 4.3 is the fist version to support ranges. However, the
# range support got backported to 4.2. But even if the server is too
# old to have the backport, negotiating BOLT 4.1 is no problem as it's
# equivalent to 4.2
first_with_range_support = Version(4, 2)
result = []
for version in versions:
if (result
and version >= first_with_range_support
and result[-1][0] == version[0]
and result[-1][1][1] == version[1] + 1):
# can use range to encompass this version
result[-1][1][1] = version[1]
continue
result.append(Version(version[0], [version[1], version[1]]))
if len(result) == 4:
break
return result
@classmethod
def get_handshake(cls):
""" Return the supported Bolt versions as bytes.
The length is 16 bytes as specified in the Bolt version negotiation.
:return: bytes
"""
supported_versions = sorted(cls.protocol_handlers().keys(), reverse=True)
offered_versions = cls.version_list(supported_versions)
return b"".join(version.to_bytes() for version in offered_versions).ljust(16, b"\x00")
@classmethod
def ping(cls, address, *, timeout=None, **config):
""" Attempt to establish a Bolt connection, returning the
agreed Bolt protocol version if successful.
"""
config = PoolConfig.consume(config)
try:
s, protocol_version, handshake, data = \
BoltSocket.connect(
address,
timeout=timeout,
custom_resolver=config.resolver,
ssl_context=config.get_ssl_context(),
keep_alive=config.keep_alive,
)
except (ServiceUnavailable, SessionExpired, BoltHandshakeError):
return None
else:
BoltSocket.close_socket(s)
return protocol_version
@classmethod
def open(
cls, address, *, auth=None, timeout=None, routing_context=None,
**pool_config
):
"""Open a new Bolt connection to a given server address.
:param address:
:param auth:
:param timeout: the connection timeout in seconds
:param routing_context: dict containing routing context
:param pool_config:
:return: connected Bolt instance
:raise BoltHandshakeError:
raised if the Bolt Protocol can not negotiate a protocol version.
:raise ServiceUnavailable: raised if there was a connection issue.
"""
def time_remaining():
if timeout is None:
return None
t = timeout - (perf_counter() - t0)
return t if t > 0 else 0
t0 = perf_counter()
pool_config = PoolConfig.consume(pool_config)
socket_connection_timeout = pool_config.connection_timeout
if socket_connection_timeout is None:
socket_connection_timeout = time_remaining()
elif timeout is not None:
socket_connection_timeout = min(pool_config.connection_timeout,
time_remaining())
s, pool_config.protocol_version, handshake, data = \
BoltSocket.connect(
address,
timeout=socket_connection_timeout,
custom_resolver=pool_config.resolver,
ssl_context=pool_config.get_ssl_context(),
keep_alive=pool_config.keep_alive,
)
# Carry out Bolt subclass imports locally to avoid circular dependency
# issues.
if pool_config.protocol_version == (3, 0):
from ._bolt3 import Bolt3
bolt_cls = Bolt3
# Implementation for 4.0 exists, but there was no space left in the
# handshake to offer this version to the server. Hence, the server
# should never request us to speak bolt 4.0.
# elif pool_config.protocol_version == (4, 0):
# from ._bolt4 import AsyncBolt4x0
# bolt_cls = AsyncBolt4x0
elif pool_config.protocol_version == (4, 1):
from ._bolt4 import Bolt4x1
bolt_cls = Bolt4x1
elif pool_config.protocol_version == (4, 2):
from ._bolt4 import Bolt4x2
bolt_cls = Bolt4x2
elif pool_config.protocol_version == (4, 3):
from ._bolt4 import Bolt4x3
bolt_cls = Bolt4x3
elif pool_config.protocol_version == (4, 4):
from ._bolt4 import Bolt4x4
bolt_cls = Bolt4x4
elif pool_config.protocol_version == (5, 0):
from ._bolt5 import Bolt5x0
bolt_cls = Bolt5x0
else:
log.debug("[#%04X] S: <CLOSE>", s.getsockname()[1])
BoltSocket.close_socket(s)
supported_versions = cls.protocol_handlers().keys()
raise BoltHandshakeError(
"The Neo4J server does not support communication with this "
"driver. This driver has support for Bolt protocols "
"{}".format(tuple(map(str, supported_versions))),
address=address, request_data=handshake, response_data=data
)
connection = bolt_cls(
address, s, pool_config.max_connection_lifetime, auth=auth,
user_agent=pool_config.user_agent, routing_context=routing_context
)
try:
connection.socket.set_deadline(time_remaining())
try:
connection.hello()
finally:
connection.socket.set_deadline(None)
except Exception:
connection.close_non_blocking()
raise
return connection
@property
@abc.abstractmethod
def encrypted(self):
pass
@property
@abc.abstractmethod
def der_encoded_server_certificate(self):
pass
@abc.abstractmethod
def hello(self, dehydration_hooks=None, hydration_hooks=None):
""" Appends a HELLO message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
def route(
self, database=None, imp_user=None, bookmarks=None,
dehydration_hooks=None, hydration_hooks=None
):
""" Fetch a routing table from the server for the given
`database`. For Bolt 4.3 and above, this appends a ROUTE
message; for earlier versions, a procedure call is made via
the regular Cypher execution mechanism. In all cases, this is
sent to the network, and a response is fetched.
:param database: database for which to fetch a routing table
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param bookmarks: iterable of bookmark values after which this
transaction should begin
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
@abc.abstractmethod
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None,
dehydration_hooks=None, hydration_hooks=None,
**handlers):
""" Appends a RUN message to the output queue.
:param query: Cypher query string
:param parameters: dictionary of Cypher parameters
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
"""
pass
@abc.abstractmethod
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
""" Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
"""
pass
@abc.abstractmethod
def pull(self, n=-1, qid=-1, dehydration_hooks=None, hydration_hooks=None,
**handlers):
""" Appends a PULL message to the output queue.
:param n: number of records to pull, default = -1 (ALL)
:param qid: query ID to pull for, default = -1 (last query)
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
"""
pass
@abc.abstractmethod
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
""" Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def commit(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
""" Appends a COMMIT message to the output queue.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
@abc.abstractmethod
def rollback(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
""" Appends a ROLLBACK message to the output queue.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything."""
pass
@abc.abstractmethod
def reset(self, dehydration_hooks=None, hydration_hooks=None):
""" Appends a RESET message to the outgoing queue, sends it and consumes
all remaining messages.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
@abc.abstractmethod
def goodbye(self, dehydration_hooks=None, hydration_hooks=None):
"""Append a GOODBYE message to the outgoing queue.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
def new_hydration_scope(self):
return self.hydration_handler.new_hydration_scope()
def _append(self, signature, fields=(), response=None,
dehydration_hooks=None):
""" Appends a message to the outgoing queue.
:param signature: the signature of the message
:param fields: the fields of the message as a tuple
:param response: a response object to handle callbacks
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
"""
self.outbox.append_message(signature, fields, dehydration_hooks)
self.responses.append(response)
def _send_all(self):
if self.outbox.flush():
self.idle_since = perf_counter()
def send_all(self):
""" Send all queued messages to the server.
"""
if self.closed():
raise ServiceUnavailable(
"Failed to write to closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self.defunct():
raise ServiceUnavailable(
"Failed to write to defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
self._send_all()
@abc.abstractmethod
def _process_message(self, tag, fields):
""" Receive at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
pass
def fetch_message(self):
if self._closed:
raise ServiceUnavailable(
"Failed to read from closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self._defunct:
raise ServiceUnavailable(
"Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if not self.responses:
return 0, 0
# Receive exactly one message
tag, fields = self.inbox.pop(
hydration_hooks=self.responses[0].hydration_hooks
)
res = self._process_message(tag, fields)
self.idle_since = perf_counter()
return res
def fetch_all(self):
""" Fetch all outstanding messages.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
detail_count = summary_count = 0
while self.responses:
response = self.responses[0]
while not response.complete:
detail_delta, summary_delta = self.fetch_message()
detail_count += detail_delta
summary_count += summary_delta
return detail_count, summary_count
def _set_defunct_read(self, error=None, silent=False):
message = "Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
self._set_defunct(message, error=error, silent=silent)
def _set_defunct_write(self, error=None, silent=False):
message = "Failed to write data to connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
self._set_defunct(message, error=error, silent=silent)
def _set_defunct(self, message, error=None, silent=False):
from ._pool import BoltPool
direct_driver = isinstance(self.pool, BoltPool)
if error:
log.debug("[#%04X] %r", self.socket.getsockname()[1], error)
log.error(message)
# We were attempting to receive data but the connection
# has unexpectedly terminated. So, we need to close the
# connection from the client side, and remove the address
# from the connection pool.
self._defunct = True
if not self._closing:
# If we fail while closing the connection, there is no need to
# remove the connection from the pool, nor to try to close the
# connection again.
self.close()
if self.pool:
self.pool.deactivate(address=self.unresolved_address)
# Iterate through the outstanding responses, and if any correspond
# to COMMIT requests then raise an error to signal that we are
# unable to confirm that the COMMIT completed successfully.
if silent:
return
for response in self.responses:
if isinstance(response, CommitResponse):
if error:
raise IncompleteCommit(message) from error
else:
raise IncompleteCommit(message)
if direct_driver:
if error:
raise ServiceUnavailable(message) from error
else:
raise ServiceUnavailable(message)
else:
if error:
raise SessionExpired(message) from error
else:
raise SessionExpired(message)
def stale(self):
return (self._stale
or (0 <= self._max_connection_lifetime
<= perf_counter() - self._creation_timestamp))
_stale = False
def set_stale(self):
self._stale = True
def close(self):
"""Close the connection."""
if self._closed or self._closing:
return
self._closing = True
if not self._defunct:
self.goodbye()
try:
self._send_all()
except (OSError, BoltError, DriverError):
pass
log.debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except OSError:
pass
finally:
self._closed = True
def close_non_blocking(self):
"""Set the socket to non-blocking and close it.
This will try to send the `GOODBYE` message (given the socket is not
marked as defunct). However, should the write operation require
blocking (e.g., a full network buffer), then the socket will be closed
immediately (without `GOODBYE` message).
"""
if self._closed or self._closing:
return
self.socket.settimeout(0)
self.close()
def closed(self):
return self._closed
def defunct(self):
return self._defunct
def is_idle_for(self, timeout):
"""Check if connection has been idle for at least the given timeout.
:param timeout: timeout in seconds
:type timeout: float
:rtype: bool
"""
return perf_counter() - self.idle_since > timeout
BoltSocket.Bolt = Bolt
|
neo4j/_sync/io/_bolt.py
|
codereval_python_data_7
|
This function is a decorator for transaction functions that allows extra control over how the transaction is carried out.
For example, a timeout may be applied::
from neo4j import unit_of_work
@unit_of_work(timeout=100)
def count_people_tx(tx):
result = tx.run("MATCH (a:Person) RETURN count(a) AS persons")
record = result.single()
return record["persons"]
:param metadata:
a dictionary with metadata.
Specified metadata will be attached to the executing transaction and visible in the output of ``dbms.listQueries`` and ``dbms.listTransactions`` procedures.
It will also get logged to the ``query.log``.
This functionality makes it easier to tag transactions and is equivalent to ``dbms.setTXMetaData`` procedure, see https://neo4j.com/docs/operations-manual/current/reference/procedures/ for procedure reference.
:type metadata: dict
:param timeout:
the transaction timeout in seconds.
Transactions that execute longer than the configured timeout will be terminated by the database.
This functionality allows to limit query/transaction execution time.
Specified timeout overrides the default timeout configured in the database using ``dbms.transaction.timeout`` setting.
Value should not represent a negative duration.
A zero duration will make the transaction execute indefinitely.
None will use the default timeout configured in the database.
:type timeout: float or :const:`None`
def unit_of_work(metadata=None, timeout=None):
"""This function is a decorator for transaction functions that allows extra control over how the transaction is carried out.
For example, a timeout may be applied::
from neo4j import unit_of_work
@unit_of_work(timeout=100)
def count_people_tx(tx):
result = tx.run("MATCH (a:Person) RETURN count(a) AS persons")
record = result.single()
return record["persons"]
:param metadata:
a dictionary with metadata.
Specified metadata will be attached to the executing transaction and visible in the output of ``dbms.listQueries`` and ``dbms.listTransactions`` procedures.
It will also get logged to the ``query.log``.
This functionality makes it easier to tag transactions and is equivalent to ``dbms.setTXMetaData`` procedure, see https://neo4j.com/docs/operations-manual/current/reference/procedures/ for procedure reference.
:type metadata: dict
:param timeout:
the transaction timeout in seconds.
Transactions that execute longer than the configured timeout will be terminated by the database.
This functionality allows to limit query/transaction execution time.
Specified timeout overrides the default timeout configured in the database using ``dbms.transaction.timeout`` setting.
Value should not represent a negative duration.
A zero duration will make the transaction execute indefinitely.
None will use the default timeout configured in the database.
:type timeout: float or :const:`None`
"""
def wrapper(f):
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.metadata = metadata
wrapped.timeout = timeout
return wrapped
return wrapper
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Query:
""" Create a new query.
:param text: The query text.
:type text: str
:param metadata: metadata attached to the query.
:type metadata: dict
:param timeout: seconds.
:type timeout: float or :const:`None`
"""
def __init__(self, text, metadata=None, timeout=None):
self.text = text
self.metadata = metadata
self.timeout = timeout
def __str__(self):
return str(self.text)
def unit_of_work(metadata=None, timeout=None):
"""This function is a decorator for transaction functions that allows extra control over how the transaction is carried out.
For example, a timeout may be applied::
from neo4j import unit_of_work
@unit_of_work(timeout=100)
def count_people_tx(tx):
result = tx.run("MATCH (a:Person) RETURN count(a) AS persons")
record = result.single()
return record["persons"]
:param metadata:
a dictionary with metadata.
Specified metadata will be attached to the executing transaction and visible in the output of ``dbms.listQueries`` and ``dbms.listTransactions`` procedures.
It will also get logged to the ``query.log``.
This functionality makes it easier to tag transactions and is equivalent to ``dbms.setTXMetaData`` procedure, see https://neo4j.com/docs/operations-manual/current/reference/procedures/ for procedure reference.
:type metadata: dict
:param timeout:
the transaction timeout in seconds.
Transactions that execute longer than the configured timeout will be terminated by the database.
This functionality allows to limit query/transaction execution time.
Specified timeout overrides the default timeout configured in the database using ``dbms.transaction.timeout`` setting.
Value should not represent a negative duration.
A zero duration will make the transaction execute indefinitely.
None will use the default timeout configured in the database.
:type timeout: float or :const:`None`
"""
def wrapper(f):
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.metadata = metadata
wrapped.timeout = timeout
return wrapped
return wrapper
|
neo4j/work/query.py
|
codereval_python_data_8
|
Return the index of the given item.
:param key: a key
:return: index
:rtype: int
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_9
|
Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_10
|
Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_11
|
Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
def pop_u16(self):
""" Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
"""
if self.used >= 2:
value = 0x100 * self.data[self.used - 2] + self.data[self.used - 1]
self.used -= 2
return value
else:
return -1
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codecs import decode
from contextlib import contextmanager
from struct import (
pack as struct_pack,
unpack as struct_unpack,
)
from .._common import Structure
PACKED_UINT_8 = [struct_pack(">B", value) for value in range(0x100)]
PACKED_UINT_16 = [struct_pack(">H", value) for value in range(0x10000)]
UNPACKED_UINT_8 = {bytes(bytearray([x])): x for x in range(0x100)}
UNPACKED_UINT_16 = {struct_pack(">H", x): x for x in range(0x10000)}
UNPACKED_MARKERS = {b"\xC0": None, b"\xC2": False, b"\xC3": True}
UNPACKED_MARKERS.update({bytes(bytearray([z])): z for z in range(0x00, 0x80)})
UNPACKED_MARKERS.update({bytes(bytearray([z + 256])): z for z in range(-0x10, 0x00)})
INT64_MIN = -(2 ** 63)
INT64_MAX = 2 ** 63
class Packer:
def __init__(self, stream):
self.stream = stream
self._write = self.stream.write
def pack_raw(self, data):
self._write(data)
def pack(self, value, dehydration_hooks=None):
write = self._write
# None
if value is None:
write(b"\xC0") # NULL
# Boolean
elif value is True:
write(b"\xC3")
elif value is False:
write(b"\xC2")
# Float (only double precision is supported)
elif isinstance(value, float):
write(b"\xC1")
write(struct_pack(">d", value))
# Integer
elif isinstance(value, int):
if -0x10 <= value < 0x80:
write(PACKED_UINT_8[value % 0x100])
elif -0x80 <= value < -0x10:
write(b"\xC8")
write(PACKED_UINT_8[value % 0x100])
elif -0x8000 <= value < 0x8000:
write(b"\xC9")
write(PACKED_UINT_16[value % 0x10000])
elif -0x80000000 <= value < 0x80000000:
write(b"\xCA")
write(struct_pack(">i", value))
elif INT64_MIN <= value < INT64_MAX:
write(b"\xCB")
write(struct_pack(">q", value))
else:
raise OverflowError("Integer %s out of range" % value)
# String
elif isinstance(value, str):
encoded = value.encode("utf-8")
self.pack_string_header(len(encoded))
self.pack_raw(encoded)
# Bytes
elif isinstance(value, (bytes, bytearray)):
self.pack_bytes_header(len(value))
self.pack_raw(value)
# List
elif isinstance(value, list):
self.pack_list_header(len(value))
for item in value:
self.pack(item, dehydration_hooks=dehydration_hooks)
# Map
elif isinstance(value, dict):
self.pack_map_header(len(value))
for key, item in value.items():
if not isinstance(key, str):
raise TypeError(
"Map keys must be strings, not {}".format(type(key))
)
self.pack(key, dehydration_hooks=dehydration_hooks)
self.pack(item, dehydration_hooks=dehydration_hooks)
# Structure
elif isinstance(value, Structure):
self.pack_struct(value.tag, value.fields)
# Other
elif dehydration_hooks and type(value) in dehydration_hooks:
self.pack(dehydration_hooks[type(value)](value))
else:
raise ValueError("Values of type %s are not supported" % type(value))
def pack_bytes_header(self, size):
write = self._write
if size < 0x100:
write(b"\xCC")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xCD")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xCE")
write(struct_pack(">I", size))
else:
raise OverflowError("Bytes header size out of range")
def pack_string_header(self, size):
write = self._write
if size <= 0x0F:
write(bytes((0x80 | size,)))
elif size < 0x100:
write(b"\xD0")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD1")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xD2")
write(struct_pack(">I", size))
else:
raise OverflowError("String header size out of range")
def pack_list_header(self, size):
write = self._write
if size <= 0x0F:
write(bytes((0x90 | size,)))
elif size < 0x100:
write(b"\xD4")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD5")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xD6")
write(struct_pack(">I", size))
else:
raise OverflowError("List header size out of range")
def pack_map_header(self, size):
write = self._write
if size <= 0x0F:
write(bytes((0xA0 | size,)))
elif size < 0x100:
write(b"\xD8")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD9")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xDA")
write(struct_pack(">I", size))
else:
raise OverflowError("Map header size out of range")
def pack_struct(self, signature, fields, dehydration_hooks=None):
if len(signature) != 1 or not isinstance(signature, bytes):
raise ValueError("Structure signature must be a single byte value")
write = self._write
size = len(fields)
if size <= 0x0F:
write(bytes((0xB0 | size,)))
else:
raise OverflowError("Structure size out of range")
write(signature)
for field in fields:
self.pack(field, dehydration_hooks=dehydration_hooks)
@staticmethod
def new_packable_buffer():
return PackableBuffer()
class PackableBuffer:
def __init__(self):
self.data = bytearray()
# export write method for packer; "inline" for performance
self.write = self.data.extend
self.clear = self.data.clear
self._tmp_buffering = 0
@contextmanager
def tmp_buffer(self):
self._tmp_buffering += 1
old_len = len(self.data)
try:
yield
except Exception:
del self.data[old_len:]
raise
finally:
self._tmp_buffering -= 1
def is_tmp_buffering(self):
return bool(self._tmp_buffering)
class Unpacker:
def __init__(self, unpackable):
self.unpackable = unpackable
def reset(self):
self.unpackable.reset()
def read(self, n=1):
return self.unpackable.read(n)
def read_u8(self):
return self.unpackable.read_u8()
def unpack(self, hydration_hooks=None):
value = self._unpack(hydration_hooks=hydration_hooks)
if hydration_hooks and type(value) in hydration_hooks:
return hydration_hooks[type(value)](value)
return value
def _unpack(self, hydration_hooks=None):
marker = self.read_u8()
if marker == -1:
raise ValueError("Nothing to unpack")
# Tiny Integer
if 0x00 <= marker <= 0x7F:
return marker
elif 0xF0 <= marker <= 0xFF:
return marker - 0x100
# Null
elif marker == 0xC0:
return None
# Float
elif marker == 0xC1:
value, = struct_unpack(">d", self.read(8))
return value
# Boolean
elif marker == 0xC2:
return False
elif marker == 0xC3:
return True
# Integer
elif marker == 0xC8:
return struct_unpack(">b", self.read(1))[0]
elif marker == 0xC9:
return struct_unpack(">h", self.read(2))[0]
elif marker == 0xCA:
return struct_unpack(">i", self.read(4))[0]
elif marker == 0xCB:
return struct_unpack(">q", self.read(8))[0]
# Bytes
elif marker == 0xCC:
size, = struct_unpack(">B", self.read(1))
return self.read(size).tobytes()
elif marker == 0xCD:
size, = struct_unpack(">H", self.read(2))
return self.read(size).tobytes()
elif marker == 0xCE:
size, = struct_unpack(">I", self.read(4))
return self.read(size).tobytes()
else:
marker_high = marker & 0xF0
# String
if marker_high == 0x80: # TINY_STRING
return decode(self.read(marker & 0x0F), "utf-8")
elif marker == 0xD0: # STRING_8:
size, = struct_unpack(">B", self.read(1))
return decode(self.read(size), "utf-8")
elif marker == 0xD1: # STRING_16:
size, = struct_unpack(">H", self.read(2))
return decode(self.read(size), "utf-8")
elif marker == 0xD2: # STRING_32:
size, = struct_unpack(">I", self.read(4))
return decode(self.read(size), "utf-8")
# List
elif 0x90 <= marker <= 0x9F or 0xD4 <= marker <= 0xD6:
return list(self._unpack_list_items(
marker, hydration_hooks=hydration_hooks)
)
# Map
elif 0xA0 <= marker <= 0xAF or 0xD8 <= marker <= 0xDA:
return self._unpack_map(
marker, hydration_hooks=hydration_hooks
)
# Structure
elif 0xB0 <= marker <= 0xBF:
size, tag = self._unpack_structure_header(marker)
value = Structure(tag, *([None] * size))
for i in range(len(value)):
value[i] = self.unpack(hydration_hooks=hydration_hooks)
return value
else:
raise ValueError("Unknown PackStream marker %02X" % marker)
def _unpack_list_items(self, marker, hydration_hooks=None):
marker_high = marker & 0xF0
if marker_high == 0x90:
size = marker & 0x0F
if size == 0:
return
elif size == 1:
yield self.unpack(hydration_hooks=hydration_hooks)
else:
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
elif marker == 0xD4: # LIST_8:
size, = struct_unpack(">B", self.read(1))
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
elif marker == 0xD5: # LIST_16:
size, = struct_unpack(">H", self.read(2))
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
elif marker == 0xD6: # LIST_32:
size, = struct_unpack(">I", self.read(4))
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
else:
return
def unpack_map(self, hydration_hooks=None):
marker = self.read_u8()
return self._unpack_map(marker, hydration_hooks=hydration_hooks)
def _unpack_map(self, marker, hydration_hooks=None):
marker_high = marker & 0xF0
if marker_high == 0xA0:
size = marker & 0x0F
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
elif marker == 0xD8: # MAP_8:
size, = struct_unpack(">B", self.read(1))
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
elif marker == 0xD9: # MAP_16:
size, = struct_unpack(">H", self.read(2))
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
elif marker == 0xDA: # MAP_32:
size, = struct_unpack(">I", self.read(4))
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
else:
return None
def unpack_structure_header(self):
marker = self.read_u8()
if marker == -1:
return None, None
else:
return self._unpack_structure_header(marker)
def _unpack_structure_header(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xB0: # TINY_STRUCT
signature = self.read(1).tobytes()
return marker & 0x0F, signature
else:
raise ValueError("Expected structure, found marker %02X" % marker)
@staticmethod
def new_unpackable_buffer():
return UnpackableBuffer()
class UnpackableBuffer:
initial_capacity = 8192
def __init__(self, data=None):
if data is None:
self.data = bytearray(self.initial_capacity)
self.used = 0
else:
self.data = bytearray(data)
self.used = len(self.data)
self.p = 0
def reset(self):
self.used = 0
self.p = 0
def read(self, n=1):
view = memoryview(self.data)
q = self.p + n
subview = view[self.p:q]
self.p = q
return subview
def read_u8(self):
if self.used - self.p >= 1:
value = self.data[self.p]
self.p += 1
return value
else:
return -1
def pop_u16(self):
""" Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
"""
if self.used >= 2:
value = 0x100 * self.data[self.used - 2] + self.data[self.used - 1]
self.used -= 2
return value
else:
return -1
|
neo4j/_codec/packstream/v1/__init__.py
|
codereval_python_data_12
|
Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: DISCARD_ALL", self.local_port)
self._append(b"\x2F", (),
Response(self, "discard", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from logging import getLogger
from ssl import SSLSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltProtocolError,
)
from ...api import (
READ_ACCESS,
Version,
)
from ...exceptions import (
ConfigurationError,
DatabaseUnavailable,
DriverError,
ForbiddenOnReadOnlyDatabase,
Neo4jError,
NotALeader,
ServiceUnavailable,
)
from ._bolt import AsyncBolt
from ._common import (
check_supported_server_product,
CommitResponse,
InitResponse,
Response,
)
log = getLogger("neo4j")
class ServerStates(Enum):
CONNECTED = "CONNECTED"
READY = "READY"
STREAMING = "STREAMING"
TX_READY_OR_TX_STREAMING = "TX_READY||TX_STREAMING"
FAILED = "FAILED"
class ServerStateManager:
_STATE_TRANSITIONS = {
ServerStates.CONNECTED: {
"hello": ServerStates.READY,
},
ServerStates.READY: {
"run": ServerStates.STREAMING,
"begin": ServerStates.TX_READY_OR_TX_STREAMING,
},
ServerStates.STREAMING: {
"pull": ServerStates.READY,
"discard": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.TX_READY_OR_TX_STREAMING: {
"commit": ServerStates.READY,
"rollback": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.FAILED: {
"reset": ServerStates.READY,
}
}
def __init__(self, init_state, on_change=None):
self.state = init_state
self._on_change = on_change
def transition(self, message, metadata):
if metadata.get("has_more"):
return
state_before = self.state
self.state = self._STATE_TRANSITIONS\
.get(self.state, {})\
.get(message, self.state)
if state_before != self.state and callable(self._on_change):
self._on_change(state_before, self.state)
class AsyncBolt3(AsyncBolt):
""" Protocol handler for Bolt 3.
This is supported by Neo4j versions 3.5, 4.0, 4.1, 4.2, 4.3, and 4.4.
"""
PROTOCOL_VERSION = Version(3, 0)
supports_multiple_results = False
supports_multiple_databases = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._server_state_manager = ServerStateManager(
ServerStates.CONNECTED, on_change=self._on_server_state_change
)
def _on_server_state_change(self, old_state, new_state):
log.debug("[#%04X] State: %s > %s", self.local_port,
old_state.name, new_state.name)
@property
def is_reset(self):
# We can't be sure of the server's state if there are still pending
# responses. Unless the last message we sent was RESET. In that case
# the server state will always be READY when we're done.
if (self.responses and self.responses[-1]
and self.responses[-1].message == "reset"):
return True
return self._server_state_manager.state == ServerStates.READY
@property
def encrypted(self):
return isinstance(self.socket, SSLSocket)
@property
def der_encoded_server_certificate(self):
return self.socket.getpeercert(binary_form=True)
def get_base_headers(self):
return {
"user_agent": self.user_agent,
}
async def hello(self, dehydration_hooks=None, hydration_hooks=None):
headers = self.get_base_headers()
headers.update(self.auth_dict)
logged_headers = dict(headers)
if "credentials" in logged_headers:
logged_headers["credentials"] = "*******"
log.debug("[#%04X] C: HELLO %r", self.local_port, logged_headers)
self._append(b"\x01", (headers,),
response=InitResponse(self, "hello", hydration_hooks,
on_success=self.server_info.update),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
check_supported_server_product(self.server_info.agent)
async def route(
self, database=None, imp_user=None, bookmarks=None,
dehydration_hooks=None, hydration_hooks=None
):
if database is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}. "
"Server Agent {!r}".format(
self.PROTOCOL_VERSION, database, self.server_info.agent
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
metadata = {}
records = []
# Ignoring database and bookmarks because there is no multi-db support.
# The bookmarks are only relevant for making sure a previously created
# db exists before querying a routing table for it.
self.run(
"CALL dbms.cluster.routing.getRoutingTable($context)", # This is an internal procedure call. Only available if the Neo4j 3.5 is setup with clustering.
{"context": self.routing_context},
mode="r", # Bolt Protocol Version(3, 0) supports mode="r"
dehydration_hooks=dehydration_hooks,
hydration_hooks=hydration_hooks,
on_success=metadata.update
)
self.pull(dehydration_hooks = None, hydration_hooks = None,
on_success=metadata.update, on_records=records.extend)
await self.send_all()
await self.fetch_all()
routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records]
return routing_info
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None,
dehydration_hooks=None, hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
if not parameters:
parameters = {}
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
fields = (query, parameters, extra)
log.debug("[#%04X] C: RUN %s", self.local_port, " ".join(map(repr, fields)))
self._append(b"\x10", fields,
Response(self, "run", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: DISCARD_ALL", self.local_port)
self._append(b"\x2F", (),
Response(self, "discard", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def pull(self, n=-1, qid=-1, dehydration_hooks=None, hydration_hooks=None,
**handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: PULL_ALL", self.local_port)
self._append(b"\x3F", (),
Response(self, "pull", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,),
Response(self, "begin", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def commit(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
log.debug("[#%04X] C: COMMIT", self.local_port)
self._append(b"\x12", (),
CommitResponse(self, "commit", hydration_hooks,
**handlers),
dehydration_hooks=dehydration_hooks)
def rollback(self, dehydration_hooks=None, hydration_hooks=None,
**handlers):
log.debug("[#%04X] C: ROLLBACK", self.local_port)
self._append(b"\x13", (),
Response(self, "rollback", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
async def reset(self, dehydration_hooks=None, hydration_hooks=None):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise BoltProtocolError("RESET failed %r" % metadata, address=self.unresolved_address)
log.debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F",
response=Response(self, "reset", hydration_hooks,
on_failure=fail),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
def goodbye(self, dehydration_hooks=None, hydration_hooks=None):
log.debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", (), dehydration_hooks=dehydration_hooks)
async def _process_message(self, tag, fields):
""" Process at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
details = []
summary_signature = summary_metadata = None
if tag == b"\x71": # RECORD
details = fields
elif fields:
summary_signature = tag
summary_metadata = fields[0]
else:
summary_signature = tag
if details:
log.debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # Do not log any data
await self.responses[0].on_records(details)
if summary_signature is None:
return len(details), 0
response = self.responses.popleft()
response.complete = True
if summary_signature == b"\x70":
log.debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata)
self._server_state_manager.transition(response.message,
summary_metadata)
await response.on_success(summary_metadata or {})
elif summary_signature == b"\x7E":
log.debug("[#%04X] S: IGNORED", self.local_port)
await response.on_ignored(summary_metadata or {})
elif summary_signature == b"\x7F":
log.debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata)
self._server_state_manager.state = ServerStates.FAILED
try:
await response.on_failure(summary_metadata or {})
except (ServiceUnavailable, DatabaseUnavailable):
if self.pool:
await self.pool.deactivate(address=self.unresolved_address)
raise
except (NotALeader, ForbiddenOnReadOnlyDatabase):
if self.pool:
self.pool.on_write_failure(address=self.unresolved_address)
raise
except Neo4jError as e:
if self.pool and e.invalidates_all_connections():
await self.pool.mark_all_stale()
raise
else:
raise BoltProtocolError("Unexpected response message with signature %02X" % summary_signature, address=self.unresolved_address)
return len(details), 1
|
neo4j/_async/io/_bolt3.py
|
codereval_python_data_13
|
Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
:return: Response object
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,),
Response(self, "begin", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from logging import getLogger
from ssl import SSLSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltProtocolError,
)
from ...api import (
READ_ACCESS,
Version,
)
from ...exceptions import (
ConfigurationError,
DatabaseUnavailable,
DriverError,
ForbiddenOnReadOnlyDatabase,
Neo4jError,
NotALeader,
ServiceUnavailable,
)
from ._bolt import AsyncBolt
from ._common import (
check_supported_server_product,
CommitResponse,
InitResponse,
Response,
)
log = getLogger("neo4j")
class ServerStates(Enum):
CONNECTED = "CONNECTED"
READY = "READY"
STREAMING = "STREAMING"
TX_READY_OR_TX_STREAMING = "TX_READY||TX_STREAMING"
FAILED = "FAILED"
class ServerStateManager:
_STATE_TRANSITIONS = {
ServerStates.CONNECTED: {
"hello": ServerStates.READY,
},
ServerStates.READY: {
"run": ServerStates.STREAMING,
"begin": ServerStates.TX_READY_OR_TX_STREAMING,
},
ServerStates.STREAMING: {
"pull": ServerStates.READY,
"discard": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.TX_READY_OR_TX_STREAMING: {
"commit": ServerStates.READY,
"rollback": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.FAILED: {
"reset": ServerStates.READY,
}
}
def __init__(self, init_state, on_change=None):
self.state = init_state
self._on_change = on_change
def transition(self, message, metadata):
if metadata.get("has_more"):
return
state_before = self.state
self.state = self._STATE_TRANSITIONS\
.get(self.state, {})\
.get(message, self.state)
if state_before != self.state and callable(self._on_change):
self._on_change(state_before, self.state)
class AsyncBolt3(AsyncBolt):
""" Protocol handler for Bolt 3.
This is supported by Neo4j versions 3.5, 4.0, 4.1, 4.2, 4.3, and 4.4.
"""
PROTOCOL_VERSION = Version(3, 0)
supports_multiple_results = False
supports_multiple_databases = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._server_state_manager = ServerStateManager(
ServerStates.CONNECTED, on_change=self._on_server_state_change
)
def _on_server_state_change(self, old_state, new_state):
log.debug("[#%04X] State: %s > %s", self.local_port,
old_state.name, new_state.name)
@property
def is_reset(self):
# We can't be sure of the server's state if there are still pending
# responses. Unless the last message we sent was RESET. In that case
# the server state will always be READY when we're done.
if (self.responses and self.responses[-1]
and self.responses[-1].message == "reset"):
return True
return self._server_state_manager.state == ServerStates.READY
@property
def encrypted(self):
return isinstance(self.socket, SSLSocket)
@property
def der_encoded_server_certificate(self):
return self.socket.getpeercert(binary_form=True)
def get_base_headers(self):
return {
"user_agent": self.user_agent,
}
async def hello(self, dehydration_hooks=None, hydration_hooks=None):
headers = self.get_base_headers()
headers.update(self.auth_dict)
logged_headers = dict(headers)
if "credentials" in logged_headers:
logged_headers["credentials"] = "*******"
log.debug("[#%04X] C: HELLO %r", self.local_port, logged_headers)
self._append(b"\x01", (headers,),
response=InitResponse(self, "hello", hydration_hooks,
on_success=self.server_info.update),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
check_supported_server_product(self.server_info.agent)
async def route(
self, database=None, imp_user=None, bookmarks=None,
dehydration_hooks=None, hydration_hooks=None
):
if database is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}. "
"Server Agent {!r}".format(
self.PROTOCOL_VERSION, database, self.server_info.agent
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
metadata = {}
records = []
# Ignoring database and bookmarks because there is no multi-db support.
# The bookmarks are only relevant for making sure a previously created
# db exists before querying a routing table for it.
self.run(
"CALL dbms.cluster.routing.getRoutingTable($context)", # This is an internal procedure call. Only available if the Neo4j 3.5 is setup with clustering.
{"context": self.routing_context},
mode="r", # Bolt Protocol Version(3, 0) supports mode="r"
dehydration_hooks=dehydration_hooks,
hydration_hooks=hydration_hooks,
on_success=metadata.update
)
self.pull(dehydration_hooks = None, hydration_hooks = None,
on_success=metadata.update, on_records=records.extend)
await self.send_all()
await self.fetch_all()
routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records]
return routing_info
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None,
dehydration_hooks=None, hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
if not parameters:
parameters = {}
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
fields = (query, parameters, extra)
log.debug("[#%04X] C: RUN %s", self.local_port, " ".join(map(repr, fields)))
self._append(b"\x10", fields,
Response(self, "run", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: DISCARD_ALL", self.local_port)
self._append(b"\x2F", (),
Response(self, "discard", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def pull(self, n=-1, qid=-1, dehydration_hooks=None, hydration_hooks=None,
**handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: PULL_ALL", self.local_port)
self._append(b"\x3F", (),
Response(self, "pull", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,),
Response(self, "begin", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def commit(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
log.debug("[#%04X] C: COMMIT", self.local_port)
self._append(b"\x12", (),
CommitResponse(self, "commit", hydration_hooks,
**handlers),
dehydration_hooks=dehydration_hooks)
def rollback(self, dehydration_hooks=None, hydration_hooks=None,
**handlers):
log.debug("[#%04X] C: ROLLBACK", self.local_port)
self._append(b"\x13", (),
Response(self, "rollback", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
async def reset(self, dehydration_hooks=None, hydration_hooks=None):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise BoltProtocolError("RESET failed %r" % metadata, address=self.unresolved_address)
log.debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F",
response=Response(self, "reset", hydration_hooks,
on_failure=fail),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
def goodbye(self, dehydration_hooks=None, hydration_hooks=None):
log.debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", (), dehydration_hooks=dehydration_hooks)
async def _process_message(self, tag, fields):
""" Process at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
details = []
summary_signature = summary_metadata = None
if tag == b"\x71": # RECORD
details = fields
elif fields:
summary_signature = tag
summary_metadata = fields[0]
else:
summary_signature = tag
if details:
log.debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # Do not log any data
await self.responses[0].on_records(details)
if summary_signature is None:
return len(details), 0
response = self.responses.popleft()
response.complete = True
if summary_signature == b"\x70":
log.debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata)
self._server_state_manager.transition(response.message,
summary_metadata)
await response.on_success(summary_metadata or {})
elif summary_signature == b"\x7E":
log.debug("[#%04X] S: IGNORED", self.local_port)
await response.on_ignored(summary_metadata or {})
elif summary_signature == b"\x7F":
log.debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata)
self._server_state_manager.state = ServerStates.FAILED
try:
await response.on_failure(summary_metadata or {})
except (ServiceUnavailable, DatabaseUnavailable):
if self.pool:
await self.pool.deactivate(address=self.unresolved_address)
raise
except (NotALeader, ForbiddenOnReadOnlyDatabase):
if self.pool:
self.pool.on_write_failure(address=self.unresolved_address)
raise
except Neo4jError as e:
if self.pool and e.invalidates_all_connections():
await self.pool.mark_all_stale()
raise
else:
raise BoltProtocolError("Unexpected response message with signature %02X" % summary_signature, address=self.unresolved_address)
return len(details), 1
|
neo4j/_async/io/_bolt3.py
|
codereval_python_data_14
|
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
def round_half_to_even(n):
"""
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
"""
ten_n = 10 * n
if ten_n == int(ten_n) and ten_n % 10 == 5:
up = int(n + 0.5)
down = int(n - 0.5)
return up if up % 2 == 0 else down
else:
return int(round(n))
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"nano_add",
"nano_div",
"nano_divmod",
"symmetric_divmod",
"round_half_to_even",
]
def nano_add(x, y):
"""
>>> 0.7 + 0.2
0.8999999999999999
>>> -0.7 + 0.2
-0.49999999999999994
>>> nano_add(0.7, 0.2)
0.9
>>> nano_add(-0.7, 0.2)
-0.5
:param x:
:param y:
:return:
"""
return (int(1000000000 * x) + int(1000000000 * y)) / 1000000000
def nano_div(x, y):
"""
>>> 0.7 / 0.2
3.4999999999999996
>>> -0.7 / 0.2
-3.4999999999999996
>>> nano_div(0.7, 0.2)
3.5
>>> nano_div(-0.7, 0.2)
-3.5
:param x:
:param y:
:return:
"""
return float(1000000000 * x) / int(1000000000 * y)
def nano_divmod(x, y):
"""
>>> divmod(0.7, 0.2)
(3.0, 0.09999999999999992)
>>> nano_divmod(0.7, 0.2)
(3, 0.1)
:param x:
:param y:
:return:
"""
number = type(x)
nx = int(1000000000 * x)
ny = int(1000000000 * y)
q, r = divmod(nx, ny)
return int(q), number(r / 1000000000)
def symmetric_divmod(dividend, divisor):
number = type(dividend)
if dividend >= 0:
quotient, remainder = divmod(dividend, divisor)
return int(quotient), number(remainder)
else:
quotient, remainder = divmod(-dividend, divisor)
return -int(quotient), -number(remainder)
def round_half_to_even(n):
"""
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
"""
ten_n = 10 * n
if ten_n == int(ten_n) and ten_n % 10 == 5:
up = int(n + 0.5)
down = int(n - 0.5)
return up if up % 2 == 0 else down
else:
return int(round(n))
|
neo4j/time/_arithmetic.py
|
codereval_python_data_15
|
Dynamically create a Point subclass.
def point_type(name, fields, srid_map):
""" Dynamically create a Point subclass.
"""
def srid(self):
try:
return srid_map[len(self)]
except KeyError:
return None
attributes = {"srid": property(srid)}
for index, subclass_field in enumerate(fields):
def accessor(self, i=index, f=subclass_field):
try:
return self[i]
except IndexError:
raise AttributeError(f)
for field_alias in {subclass_field, "xyz"[index]}:
attributes[field_alias] = property(accessor)
cls = type(name, (Point,), attributes)
with srid_table_lock:
for dim, srid in srid_map.items():
srid_table[srid] = (cls, dim)
return cls
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines _spatial data types.
"""
from threading import Lock
# SRID to subclass mappings
srid_table = {}
srid_table_lock = Lock()
class Point(tuple):
"""Base-class for _spatial data.
A point within a geometric space. This type is generally used via its
subclasses and should not be instantiated directly unless there is no
subclass defined for the required SRID.
:param iterable:
An iterable of coordinates.
All items will be converted to :class:`float`.
"""
#: The SRID (_spatial reference identifier) of the _spatial data.
#: A number that identifies the coordinate system the _spatial type is to be
#: interpreted in.
#:
#: :type: int
srid = None
def __new__(cls, iterable):
return tuple.__new__(cls, map(float, iterable))
def __repr__(self):
return "POINT(%s)" % " ".join(map(str, self))
def __eq__(self, other):
try:
return type(self) is type(other) and tuple(self) == tuple(other)
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(type(self)) ^ hash(tuple(self))
def point_type(name, fields, srid_map):
""" Dynamically create a Point subclass.
"""
def srid(self):
try:
return srid_map[len(self)]
except KeyError:
return None
attributes = {"srid": property(srid)}
for index, subclass_field in enumerate(fields):
def accessor(self, i=index, f=subclass_field):
try:
return self[i]
except IndexError:
raise AttributeError(f)
for field_alias in {subclass_field, "xyz"[index]}:
attributes[field_alias] = property(accessor)
cls = type(name, (Point,), attributes)
with srid_table_lock:
for dim, srid in srid_map.items():
srid_table[srid] = (cls, dim)
return cls
# Point subclass definitions
CartesianPoint = point_type("CartesianPoint", ["x", "y", "z"],
{2: 7203, 3: 9157})
WGS84Point = point_type("WGS84Point", ["longitude", "latitude", "height"],
{2: 4326, 3: 4979})
|
neo4j/_spatial/__init__.py
|
codereval_python_data_16
|
Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
def deprecated(message):
""" Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return await f(*args, **kwargs)
return inner
else:
@wraps(f)
def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return f(*args, **kwargs)
return inner
return decorator
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from functools import wraps
from warnings import warn
# Can be automatically overridden in builds
package = "neo4j"
version = "5.0.dev0"
def get_user_agent():
""" Obtain the default user agent string sent to the server after
a successful handshake.
"""
from sys import (
platform,
version_info,
)
template = "neo4j-python/{} Python/{}.{}.{}-{}-{} ({})"
fields = (version,) + tuple(version_info) + (platform,)
return template.format(*fields)
def deprecation_warn(message, stack_level=1):
warn(message, category=DeprecationWarning, stacklevel=stack_level + 1)
def deprecated(message):
""" Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return await f(*args, **kwargs)
return inner
else:
@wraps(f)
def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return f(*args, **kwargs)
return inner
return decorator
class ExperimentalWarning(Warning):
""" Base class for warnings about experimental features.
"""
def experimental_warn(message, stack_level=1):
warn(message, category=ExperimentalWarning, stacklevel=stack_level + 1)
def experimental(message):
""" Decorator for tagging experimental functions and methods.
::
@experimental("'foo' is an experimental function and may be "
"removed in a future release")
def foo(x):
pass
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def inner(*args, **kwargs):
experimental_warn(message, stack_level=2)
return await f(*args, **kwargs)
return inner
else:
@wraps(f)
def inner(*args, **kwargs):
experimental_warn(message, stack_level=2)
return f(*args, **kwargs)
return inner
return decorator
def unclosed_resource_warn(obj):
import tracemalloc
from warnings import warn
msg = f"Unclosed {obj!r}."
trace = tracemalloc.get_object_traceback(obj)
if trace:
msg += "\nObject allocated at (most recent call last):\n"
msg += "\n".join(trace.format())
else:
msg += "\nEnable tracemalloc to get the object allocation traceback."
warn(msg, ResourceWarning, stacklevel=2, source=obj)
|
neo4j/_meta.py
|
codereval_python_data_17
|
Some behaviour of R cannot be configured via env variables, but can
only be configured via R options once R has started. These are set here.
def _inline_r_setup(code: str) -> str:
"""
Some behaviour of R cannot be configured via env variables, but can
only be configured via R options once R has started. These are set here.
"""
with_option = f"""\
options(install.packages.compile.from.source = "never")
{code}
"""
return with_option
from __future__ import annotations
import contextlib
import os
import shlex
import shutil
from typing import Generator
from typing import Sequence
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.envcontext import UNSET
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output_b
ENVIRONMENT_DIR = 'renv'
RSCRIPT_OPTS = ('--no-save', '--no-restore', '--no-site-file', '--no-environ')
get_default_version = helpers.basic_get_default_version
health_check = helpers.basic_health_check
def get_env_patch(venv: str) -> PatchesT:
return (
('R_PROFILE_USER', os.path.join(venv, 'activate.R')),
('RENV_PROJECT', UNSET),
)
@contextlib.contextmanager
def in_env(
prefix: Prefix,
language_version: str,
) -> Generator[None, None, None]:
envdir = _get_env_dir(prefix, language_version)
with envcontext(get_env_patch(envdir)):
yield
def _get_env_dir(prefix: Prefix, version: str) -> str:
return prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
def _prefix_if_non_local_file_entry(
entry: Sequence[str],
prefix: Prefix,
src: str,
) -> Sequence[str]:
if entry[1] == '-e':
return entry[1:]
else:
if src == 'local':
path = entry[1]
else:
path = prefix.path(entry[1])
return (path,)
def _rscript_exec() -> str:
r_home = os.environ.get('R_HOME')
if r_home is None:
return 'Rscript'
else:
return os.path.join(r_home, 'bin', 'Rscript')
def _entry_validate(entry: Sequence[str]) -> None:
"""
Allowed entries:
# Rscript -e expr
# Rscript path/to/file
"""
if entry[0] != 'Rscript':
raise ValueError('entry must start with `Rscript`.')
if entry[1] == '-e':
if len(entry) > 3:
raise ValueError('You can supply at most one expression.')
elif len(entry) > 2:
raise ValueError(
'The only valid syntax is `Rscript -e {expr}`',
'or `Rscript path/to/hook/script`',
)
def _cmd_from_hook(hook: Hook) -> tuple[str, ...]:
entry = shlex.split(hook.entry)
_entry_validate(entry)
return (
*entry[:1], *RSCRIPT_OPTS,
*_prefix_if_non_local_file_entry(entry, hook.prefix, hook.src),
*hook.args,
)
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
env_dir = _get_env_dir(prefix, version)
with clean_path_on_failure(env_dir):
os.makedirs(env_dir, exist_ok=True)
shutil.copy(prefix.path('renv.lock'), env_dir)
shutil.copytree(prefix.path('renv'), os.path.join(env_dir, 'renv'))
r_code_inst_environment = f"""\
prefix_dir <- {prefix.prefix_dir!r}
options(
repos = c(CRAN = "https://cran.rstudio.com"),
renv.consent = TRUE
)
source("renv/activate.R")
renv::restore()
activate_statement <- paste0(
'suppressWarnings({{',
'old <- setwd("', getwd(), '"); ',
'source("renv/activate.R"); ',
'setwd(old); ',
'renv::load("', getwd(), '");}})'
)
writeLines(activate_statement, 'activate.R')
is_package <- tryCatch(
{{
path_desc <- file.path(prefix_dir, 'DESCRIPTION')
suppressWarnings(desc <- read.dcf(path_desc))
"Package" %in% colnames(desc)
}},
error = function(...) FALSE
)
if (is_package) {{
renv::install(prefix_dir)
}}
"""
cmd_output_b(
_rscript_exec(), '--vanilla', '-e',
_inline_r_setup(r_code_inst_environment),
cwd=env_dir,
)
if additional_dependencies:
r_code_inst_add = 'renv::install(commandArgs(trailingOnly = TRUE))'
with in_env(prefix, version):
cmd_output_b(
_rscript_exec(), *RSCRIPT_OPTS, '-e',
_inline_r_setup(r_code_inst_add),
*additional_dependencies,
cwd=env_dir,
)
def _inline_r_setup(code: str) -> str:
"""
Some behaviour of R cannot be configured via env variables, but can
only be configured via R options once R has started. These are set here.
"""
with_option = f"""\
options(install.packages.compile.from.source = "never")
{code}
"""
return with_option
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]:
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(
hook, _cmd_from_hook(hook), file_args, color=color,
)
|
pre_commit/languages/r.py
|
codereval_python_data_18
|
A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
def xargs(
cmd: tuple[str, ...],
varargs: Sequence[str],
*,
color: bool = False,
target_concurrency: int = 1,
_max_length: int = _get_platform_max_length(),
**kwargs: Any,
) -> tuple[int, bytes]:
"""A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
"""
cmd_fn = cmd_output_p if color else cmd_output_b
retcode = 0
stdout = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()[:2]
# on windows, batch files have a separate length limit than windows itself
if (
sys.platform == 'win32' and
cmd[0].lower().endswith(('.bat', '.cmd'))
): # pragma: win32 cover
# this is implementation details but the command gets translated into
# full/path/to/cmd.exe /c *cmd
cmd_exe = parse_shebang.find_executable('cmd.exe')
# 1024 is additionally subtracted to give headroom for further
# expansion inside the batch file
_max_length = 8192 - len(cmd_exe) - len(' /c ') - 1024
partitions = partition(cmd, varargs, target_concurrency, _max_length)
def run_cmd_partition(
run_cmd: tuple[str, ...],
) -> tuple[int, bytes, bytes | None]:
return cmd_fn(
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, _ in results:
retcode = max(retcode, proc_retcode)
stdout += proc_out
return retcode, stdout
from __future__ import annotations
import concurrent.futures
import contextlib
import math
import os
import subprocess
import sys
from typing import Any
from typing import Callable
from typing import Generator
from typing import Iterable
from typing import MutableMapping
from typing import Sequence
from typing import TypeVar
from pre_commit import parse_shebang
from pre_commit.util import cmd_output_b
from pre_commit.util import cmd_output_p
TArg = TypeVar('TArg')
TRet = TypeVar('TRet')
def _environ_size(_env: MutableMapping[str, str] | None = None) -> int:
environ = _env if _env is not None else getattr(os, 'environb', os.environ)
size = 8 * len(environ) # number of pointers in `envp`
for k, v in environ.items():
size += len(k) + len(v) + 2 # c strings in `envp`
return size
def _get_platform_max_length() -> int: # pragma: no cover (platform specific)
if os.name == 'posix':
maximum = os.sysconf('SC_ARG_MAX') - 2048 - _environ_size()
maximum = max(min(maximum, 2 ** 17), 2 ** 12)
return maximum
elif os.name == 'nt':
return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
else:
# posix minimum
return 2 ** 12
def _command_length(*cmd: str) -> int:
full_cmd = ' '.join(cmd)
# win32 uses the amount of characters, more details at:
# https://github.com/pre-commit/pre-commit/pull/839
if sys.platform == 'win32':
return len(full_cmd.encode('utf-16le')) // 2
else:
return len(full_cmd.encode(sys.getfilesystemencoding()))
class ArgumentTooLongError(RuntimeError):
pass
def partition(
cmd: Sequence[str],
varargs: Sequence[str],
target_concurrency: int,
_max_length: int | None = None,
) -> tuple[tuple[str, ...], ...]:
_max_length = _max_length or _get_platform_max_length()
# Generally, we try to partition evenly into at least `target_concurrency`
# partitions, but we don't want a bunch of tiny partitions.
max_args = max(4, math.ceil(len(varargs) / target_concurrency))
cmd = tuple(cmd)
ret = []
ret_cmd: list[str] = []
# Reversed so arguments are in order
varargs = list(reversed(varargs))
total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
arg_length = _command_length(arg) + 1
if (
total_length + arg_length <= _max_length and
len(ret_cmd) < max_args
):
ret_cmd.append(arg)
total_length += arg_length
elif not ret_cmd:
raise ArgumentTooLongError(arg)
else:
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
return tuple(ret)
@contextlib.contextmanager
def _thread_mapper(maxsize: int) -> Generator[
Callable[[Callable[[TArg], TRet], Iterable[TArg]], Iterable[TRet]],
None, None,
]:
if maxsize == 1:
yield map
else:
with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
yield ex.map
def xargs(
cmd: tuple[str, ...],
varargs: Sequence[str],
*,
color: bool = False,
target_concurrency: int = 1,
_max_length: int = _get_platform_max_length(),
**kwargs: Any,
) -> tuple[int, bytes]:
"""A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
"""
cmd_fn = cmd_output_p if color else cmd_output_b
retcode = 0
stdout = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()[:2]
# on windows, batch files have a separate length limit than windows itself
if (
sys.platform == 'win32' and
cmd[0].lower().endswith(('.bat', '.cmd'))
): # pragma: win32 cover
# this is implementation details but the command gets translated into
# full/path/to/cmd.exe /c *cmd
cmd_exe = parse_shebang.find_executable('cmd.exe')
# 1024 is additionally subtracted to give headroom for further
# expansion inside the batch file
_max_length = 8192 - len(cmd_exe) - len(' /c ') - 1024
partitions = partition(cmd, varargs, target_concurrency, _max_length)
def run_cmd_partition(
run_cmd: tuple[str, ...],
) -> tuple[int, bytes, bytes | None]:
return cmd_fn(
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, _ in results:
retcode = max(retcode, proc_retcode)
stdout += proc_out
return retcode, stdout
|
pre_commit/xargs.py
|
codereval_python_data_19
|
Deterministically shuffle
def _shuffled(seq: Sequence[str]) -> list[str]:
"""Deterministically shuffle"""
fixed_random = random.Random()
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
fixed_random.shuffle(seq)
return seq
from __future__ import annotations
import multiprocessing
import os
import random
import re
from typing import Any
from typing import NoReturn
from typing import overload
from typing import Sequence
import pre_commit.constants as C
from pre_commit import parse_shebang
from pre_commit.hook import Hook
from pre_commit.prefix import Prefix
from pre_commit.util import cmd_output_b
from pre_commit.xargs import xargs
FIXED_RANDOM_SEED = 1542676187
SHIMS_RE = re.compile(r'[/\\]shims[/\\]')
def exe_exists(exe: str) -> bool:
found = parse_shebang.find_executable(exe)
if found is None: # exe exists
return False
homedir = os.path.expanduser('~')
try:
common: str | None = os.path.commonpath((found, homedir))
except ValueError: # on windows, different drives raises ValueError
common = None
return (
# it is not in a /shims/ directory
not SHIMS_RE.search(found) and
(
# the homedir is / (docker, service user, etc.)
os.path.dirname(homedir) == homedir or
# the exe is not contained in the home directory
common != homedir
)
)
def run_setup_cmd(prefix: Prefix, cmd: tuple[str, ...], **kwargs: Any) -> None:
cmd_output_b(*cmd, cwd=prefix.prefix_dir, **kwargs)
@overload
def environment_dir(d: None, language_version: str) -> None: ...
@overload
def environment_dir(d: str, language_version: str) -> str: ...
def environment_dir(d: str | None, language_version: str) -> str | None:
if d is None:
return None
else:
return f'{d}-{language_version}'
def assert_version_default(binary: str, version: str) -> None:
if version != C.DEFAULT:
raise AssertionError(
f'for now, pre-commit requires system-installed {binary} -- '
f'you selected `language_version: {version}`',
)
def assert_no_additional_deps(
lang: str,
additional_deps: Sequence[str],
) -> None:
if additional_deps:
raise AssertionError(
f'for now, pre-commit does not support '
f'additional_dependencies for {lang} -- '
f'you selected `additional_dependencies: {additional_deps}`',
)
def basic_get_default_version() -> str:
return C.DEFAULT
def basic_health_check(prefix: Prefix, language_version: str) -> str | None:
return None
def no_install(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> NoReturn:
raise AssertionError('This type is not installable')
def target_concurrency(hook: Hook) -> int:
if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:
return 1
else:
# Travis appears to have a bunch of CPUs, but we can't use them all.
if 'TRAVIS' in os.environ:
return 2
else:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def _shuffled(seq: Sequence[str]) -> list[str]:
"""Deterministically shuffle"""
fixed_random = random.Random()
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
fixed_random.shuffle(seq)
return seq
def run_xargs(
hook: Hook,
cmd: tuple[str, ...],
file_args: Sequence[str],
**kwargs: Any,
) -> tuple[int, bytes]:
# Shuffle the files so that they more evenly fill out the xargs partitions,
# but do it deterministically in case a hook cares about ordering.
file_args = _shuffled(file_args)
kwargs['target_concurrency'] = target_concurrency(hook)
return xargs(cmd, file_args, **kwargs)
|
pre_commit/languages/helpers.py
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 9