index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
708,971
|
subgrounds.subgraph.fieldpath
|
__getattribute__
| null |
def __getattribute__(self, __name: str) -> Any:
# Small hack to get code completion to work while allowing updates to FieldPath
# (i.e.: setting arguments)
try:
match super().__getattribute__(__name):
case FieldPath() | SyntheticField() | None:
return self._select(__name)
case value:
return value
except AttributeError:
return self._select(__name)
|
(self, _FieldPath__name: str) -> Any
|
708,972
|
subgrounds.subgraph.fieldpath
|
__gt__
| null |
def __gt__(self, value: Any) -> Filter:
return Filter.mk_filter(self, Filter.Operator.GT, value)
|
(self, value: Any) -> subgrounds.subgraph.filter.Filter
|
708,973
|
subgrounds.subgraph.fieldpath
|
__init__
| null |
def __init__(
self,
subgraph: Subgraph,
root_type: TypeRef.T,
type_: TypeRef.T,
path: list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]],
) -> None:
self._subgraph = subgraph
self._root_type = root_type
self._type = type_
self._path = path
# Add fields as attributes if leaf is object
match self._subgraph._schema.type_of(self._leaf):
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as type_ if len(
self._path
) < FPATH_DEPTH_LIMIT:
# We generate fieldpaths up to depth 8
for fmeta in type_.fields:
path = self._path.copy()
path.append((None, fmeta))
super().__setattr__(
fmeta.name,
FieldPath(
subgraph=self._subgraph,
root_type=self._root_type,
type_=fmeta.type_,
path=path,
),
)
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as type_ if len(
self._path
) == FPATH_DEPTH_LIMIT:
for fmeta in type_.fields:
# NOTE: We set the attribute to None on purpose since we want code
# completion to work while avoiding infinite loops caused by cycles
# in the GraphQL schema. The attribute itself will be initialized
# on __getattribute_
super().__setattr__(fmeta.name, None)
case _:
pass
|
(self, subgraph: 'Subgraph', root_type: 'TypeRef.T', type_: 'TypeRef.T', path: 'list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]]') -> 'None'
|
708,974
|
subgrounds.subgraph.fieldpath
|
__le__
| null |
def __le__(self, value: Any) -> Filter:
return Filter.mk_filter(self, Filter.Operator.LTE, value)
|
(self, value: Any) -> subgrounds.subgraph.filter.Filter
|
708,975
|
subgrounds.subgraph.fieldpath
|
__lt__
| null |
def __lt__(self, value: Any) -> Filter:
return Filter.mk_filter(self, Filter.Operator.LT, value)
|
(self, value: Any) -> subgrounds.subgraph.filter.Filter
|
708,976
|
subgrounds.subgraph.fieldpath
|
__mod__
| null |
def __mod__(self, rhs: Any) -> SyntheticField:
return SyntheticField(
operator.mod, typeref_of_binary_op("mod", self._type, rhs), [self, rhs]
)
|
(self, rhs: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,977
|
subgrounds.subgraph.fieldpath
|
__mul__
| null |
def __mul__(self, other: Any) -> SyntheticField:
return SyntheticField(
operator.mul, typeref_of_binary_op("mul", self._type, other), [self, other]
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,978
|
subgrounds.subgraph.fieldpath
|
__ne__
| null |
def __ne__(self, value: Any) -> Filter:
return Filter.mk_filter(self, Filter.Operator.NEQ, value)
|
(self, value: Any) -> subgrounds.subgraph.filter.Filter
|
708,979
|
subgrounds.subgraph.fieldpath
|
__neg__
| null |
def __neg__(self) -> SyntheticField:
return SyntheticField(
operator.neg, type_ref_of_unary_op("neg", self._type), self
)
|
(self) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,980
|
subgrounds.subgraph.fieldpath
|
__pow__
| null |
def __pow__(self, rhs: Any) -> SyntheticField:
return SyntheticField(
operator.pow, typeref_of_binary_op("pow", self._type, rhs), [self, rhs]
)
|
(self, rhs: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,981
|
subgrounds.subgraph.fieldpath
|
__radd__
| null |
def __radd__(self, other: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.add(y, x),
typeref_of_binary_op("add", self._type, other),
[self, other],
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,982
|
subgrounds.subgraph.fieldpath
|
__repr__
| null |
def __repr__(self) -> str:
vars = f"{self._subgraph._url}, {self._root_type.name}, {self._name_path()}"
return f"FieldPath({vars})"
|
(self) -> str
|
708,983
|
subgrounds.subgraph.fieldpath
|
__rfloordiv__
| null |
def __rfloordiv__(self, other: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.floordiv(y, x),
typeref_of_binary_op("div", self._type, other),
[self, other],
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,984
|
subgrounds.subgraph.fieldpath
|
__rmod__
| null |
def __rmod__(self, lhs: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.mod(y, x),
typeref_of_binary_op("mod", self._type, lhs),
[self, lhs],
)
|
(self, lhs: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,985
|
subgrounds.subgraph.fieldpath
|
__rmul__
| null |
def __rmul__(self, other: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.mul(y, x),
typeref_of_binary_op("mul", self._type, other),
[self, other],
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,986
|
subgrounds.subgraph.fieldpath
|
__rpow__
| null |
def __rpow__(self, lhs: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.pow(y, x),
typeref_of_binary_op("pow", self._type, lhs),
[self, lhs],
)
|
(self, lhs: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,987
|
subgrounds.subgraph.fieldpath
|
__rsub__
| null |
def __rsub__(self, other: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.sub(y, x),
typeref_of_binary_op("sub", self._type, other),
[self, other],
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,988
|
subgrounds.subgraph.fieldpath
|
__rtruediv__
| null |
def __rtruediv__(self, other: Any) -> SyntheticField:
return SyntheticField(
lambda x, y: operator.truediv(y, x),
typeref_of_binary_op("div", self._type, other),
[self, other],
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,989
|
subgrounds.subgraph.fieldpath
|
__str__
| null |
def __str__(self) -> str:
return ".".join(self._path | map(lambda ele: ele[1].name))
|
(self) -> str
|
708,990
|
subgrounds.subgraph.fieldpath
|
__sub__
| null |
def __sub__(self, other: Any) -> SyntheticField:
return SyntheticField(
operator.sub, typeref_of_binary_op("sub", self._type, other), [self, other]
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,991
|
subgrounds.subgraph.fieldpath
|
__truediv__
| null |
def __truediv__(self, other: Any) -> SyntheticField:
return SyntheticField(
operator.truediv,
typeref_of_binary_op("div", self._type, other),
[self, other],
)
|
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
|
708,992
|
subgrounds.subgraph.fieldpath
|
_auto_select
| null |
def _auto_select(self) -> FieldPath | list[FieldPath]:
match self._subgraph._schema.type_of_typeref(self._leaf.type_):
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as obj:
return list(
fieldpaths_of_object(self._subgraph, obj)
| map(partial(FieldPath._extend, self))
)
case _:
return self
|
(self) -> subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath]
|
708,993
|
subgrounds.subgraph.fieldpath
|
_extend
|
Extends the current :class:`FieldPath` with the :class:`FieldPath`
:attr:`ext`. :attr:`ext` must start where the current :class:`FieldPath` ends.
Args:
ext: The :class:`FieldPath` representing the extension
Raises:
TypeError: [description]
TypeError: [description]
TypeError: [description]
Returns:
A new :class:`FieldPath` containing the initial current :class:`FieldPath`
extended with :attr:`ext`
|
def _extend(self, ext: FieldPath) -> FieldPath:
"""Extends the current :class:`FieldPath` with the :class:`FieldPath`
:attr:`ext`. :attr:`ext` must start where the current :class:`FieldPath` ends.
Args:
ext: The :class:`FieldPath` representing the extension
Raises:
TypeError: [description]
TypeError: [description]
TypeError: [description]
Returns:
A new :class:`FieldPath` containing the initial current :class:`FieldPath`
extended with :attr:`ext`
"""
match self._leaf:
case TypeMeta.FieldMeta() as fmeta:
match self._schema.type_of_typeref(fmeta.type_):
case TypeMeta.ObjectMeta(name=name) | TypeMeta.InterfaceMeta(
name=name
):
if name == ext._root_type.name:
return FieldPath(
subgraph=self._subgraph,
root_type=self._root_type,
type_=ext._type,
path=self._path + ext._path,
)
else:
raise TypeError(
f"extend: FieldPath {ext} does not start at the"
f" same type from where FieldPath {self} ends"
)
case _:
raise TypeError(f"extend: FieldPath {self} is not object field")
case _:
raise TypeError(f"extend: FieldPath {self} is not an object field")
|
(self, ext: subgrounds.subgraph.fieldpath.FieldPath) -> subgrounds.subgraph.fieldpath.FieldPath
|
708,994
|
subgrounds.subgraph.fieldpath
|
_extract_data
|
Extract the data corresponding to the current :class:`FieldPath` from
the dictionary :attr:`data`.
Args:
data: Data dictionary that contains the data corresponding to the current
:class:`FieldPath`.
Returns:
Data corresponding to the current :class:`FieldPath`.
|
def _extract_data(
self, data: dict[str, Any] | list[dict[str, Any]]
) -> list[Any] | Any:
"""Extract the data corresponding to the current :class:`FieldPath` from
the dictionary :attr:`data`.
Args:
data: Data dictionary that contains the data corresponding to the current
:class:`FieldPath`.
Returns:
Data corresponding to the current :class:`FieldPath`.
"""
return extract_data(self._name_path(use_aliases=True), data)
|
(self, data: dict[str, typing.Any] | list[dict[str, typing.Any]]) -> Union[list[Any], Any]
|
708,995
|
subgrounds.subgraph.fieldpath
|
_hash
| null |
@staticmethod
def _hash(msg: str) -> str:
h = blake2b(digest_size=8)
h.update(msg.encode("UTF-8"))
return "x" + h.hexdigest()
|
(msg: str) -> str
|
708,996
|
subgrounds.subgraph.fieldpath
|
_merge
|
Returns a Selection tree containing all selection paths in `fpaths`.
Note: Assumes that all fieldpaths in `fpaths` belong to the same subgraph
Args:
fpaths: _description_
Returns:
_description_
|
@staticmethod
def _merge(fpaths: list[FieldPath]) -> list[Selection]:
"""Returns a Selection tree containing all selection paths in `fpaths`.
Note: Assumes that all fieldpaths in `fpaths` belong to the same subgraph
Args:
fpaths: _description_
Returns:
_description_
"""
query = reduce(Query.add, fpaths | map(FieldPath._selection), Query())
return query.selection
|
(fpaths: list[subgrounds.subgraph.fieldpath.FieldPath]) -> list[subgrounds.query.Selection]
|
708,997
|
subgrounds.subgraph.fieldpath
|
_name
|
Generates the name of the current :class:`FieldPath` using the names of
the fields it selects. If :attr:`use_aliases` is True, then if a field has
an automatically generated alias, the alias will be used.
Args:
use_aliases: Flag indicating wether of not to use the fields' automatically
generated alias (if present). Defaults to False.
Returns:
The generated name of the current :class:`FieldPath`.
|
def _name(self, use_aliases: bool = False) -> str:
"""Generates the name of the current :class:`FieldPath` using the names of
the fields it selects. If :attr:`use_aliases` is True, then if a field has
an automatically generated alias, the alias will be used.
Args:
use_aliases: Flag indicating wether of not to use the fields' automatically
generated alias (if present). Defaults to False.
Returns:
The generated name of the current :class:`FieldPath`.
"""
return "_".join(self._name_path(use_aliases=use_aliases))
|
(self, use_aliases: bool = False) -> str
|
708,998
|
subgrounds.subgraph.fieldpath
|
_name_path
|
Returns a list of strings correspoding to the names of all fields
selected in the current :class:`FieldPath`. If :attr:`use_aliases` is True,
then if a field has an automatically generated alias, the alias will be
returned.
Args:
use_aliases: Flag indicating wether of not to use the fields' automatically
generated alias (if present). Defaults to False.
Returns:
List of field names selected in the current :class:`FieldPath`
|
def _name_path(self, use_aliases: bool = False) -> list[str]:
"""Returns a list of strings correspoding to the names of all fields
selected in the current :class:`FieldPath`. If :attr:`use_aliases` is True,
then if a field has an automatically generated alias, the alias will be
returned.
Args:
use_aliases: Flag indicating wether of not to use the fields' automatically
generated alias (if present). Defaults to False.
Returns:
List of field names selected in the current :class:`FieldPath`
"""
def gen_alias(ele: tuple[dict[str, Any] | None, TypeMeta.FieldMeta]) -> str:
if ele[0] != {} and ele[0] is not None:
return FieldPath._hash(ele[1].name + str(ele[0]))
else:
return ele[1].name
return list(
self._path | map(lambda ele: gen_alias(ele) if use_aliases else ele[1].name)
)
|
(self, use_aliases: bool = False) -> list[str]
|
708,999
|
subgrounds.subgraph.fieldpath
|
_select
|
Returns a new FieldPath corresponding to the FieldPath `self` extended with
an additional selection on the field named `name`.
Args:
name: The name of the field to expand on the leaf of `fpath`
Raises:
TypeError: [description]
TypeError: [description]
TypeError: [description]
Returns:
A new FieldPath containing `fpath` extended with the field named `name`
|
def _select(self, name: str) -> FieldPath:
"""Returns a new FieldPath corresponding to the FieldPath `self` extended with
an additional selection on the field named `name`.
Args:
name: The name of the field to expand on the leaf of `fpath`
Raises:
TypeError: [description]
TypeError: [description]
TypeError: [description]
Returns:
A new FieldPath containing `fpath` extended with the field named `name`
"""
match self._schema.type_of_typeref(self._type):
# If the FieldPath fpath
case TypeMeta.EnumMeta() | TypeMeta.ScalarMeta():
raise TypeError(
f"FieldPath: path {self} ends with a scalar field!"
f" cannot select field {name}"
)
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as obj:
field = obj.field(name)
match self._schema.type_of_typeref(field.type_):
case (
TypeMeta.ObjectMeta()
| TypeMeta.InterfaceMeta()
| TypeMeta.EnumMeta()
| TypeMeta.ScalarMeta()
):
# Copy current path and append newly selected field
path = self._path.copy()
path.append((None, field))
# Return new FieldPath
return FieldPath(
subgraph=self._subgraph,
root_type=self._root_type,
type_=field.type_,
path=path,
)
case _:
raise TypeError(
f"FieldPath: field {name} is not a valid field for object"
f" {self._type.name} at path {self}"
)
case _:
raise TypeError(
f"FieldPath: Unexpected type {self._type.name}"
f" when selection {name} on {self}"
)
|
(self, name: str) -> subgrounds.subgraph.fieldpath.FieldPath
|
709,000
|
subgrounds.subgraph.fieldpath
|
_selection
|
Returns a selection or list of selections corresponding to the current
:class:`FieldPath`.
Returns:
Selection | list[Selection]: _description_
|
def _selection(self) -> Selection | list[Selection]:
"""Returns a selection or list of selections corresponding to the current
:class:`FieldPath`.
Returns:
Selection | list[Selection]: _description_
"""
def f(
path: list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]]
) -> list[Selection]:
match path:
case [
(args, TypeMeta.FieldMeta() as fmeta),
*rest,
] if args == {} or args is None:
return [Selection(fmeta, selection=f(rest))]
case [(args, TypeMeta.FieldMeta() as fmeta), *rest]:
return [
Selection(
fmeta,
# TODO: Revisit this
alias=FieldPath._hash(fmeta.name + str(args)),
arguments=arguments_of_field_args(
self._subgraph._schema, fmeta, args
),
selection=f(rest),
)
]
case []:
return []
assert False # Suppress mypy missing return statement warning
return f(self._path)[0]
|
(self) -> subgrounds.query.Selection | list[subgrounds.query.Selection]
|
709,001
|
subgrounds.subgraph.fieldpath
|
_set_arguments
|
Set the arguments to the leaf of the current :class:`FieldPath`. The
method returns the :attr:`self`.
Args:
args: _description_
selection: _description_. Defaults to [].
Returns:
_description_
|
def _set_arguments(
self, args: dict[str, Any], selection: list[FieldPath] = []
) -> FieldPath | list[FieldPath]:
"""Set the arguments to the leaf of the current :class:`FieldPath`. The
method returns the :attr:`self`.
Args:
args: _description_
selection: _description_. Defaults to [].
Returns:
_description_
"""
def fmt_arg(name, raw_arg):
match (name, raw_arg):
case ("where", [Filter(), *_] as filters):
return Filter.to_dict(filters)
case ("orderBy", FieldPath() as fpath):
if paths := fpath._name_path():
return "__".join(paths)
raise Exception(
f"Cannot use empty paths as orderBy argument {fpath}"
)
case _:
return raw_arg
match self._leaf:
case TypeMeta.FieldMeta():
args = {key: fmt_arg(key, val) for key, val in args.items()}
self._path[-1] = (args, self._path[-1][1])
if len(selection) > 0:
return list(selection | map(partial(FieldPath._extend, self)))
else:
return self
case _:
raise TypeError(f"Unexpected type for FieldPath {self}")
|
(self, args: dict[str, typing.Any], selection: list[subgrounds.subgraph.fieldpath.FieldPath] = []) -> subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath]
|
709,002
|
subgrounds.subgraph.subgraph
|
Subgraph
|
Subgraph(url: 'str', schema: 'SchemaMeta', transforms: 'list[DocumentTransform]' = [<subgrounds.transform.transforms.TypeTransform object at 0x7f8aea508400>, <subgrounds.transform.transforms.TypeTransform object at 0x7f8aea5091e0>], is_subgraph: 'bool' = True) -> 'None'
|
class Subgraph:
_url: str
_schema: SchemaMeta
_transforms: list[DocumentTransform] = field(default_factory=list)
_is_subgraph: bool = True
def __init__(
self,
url: str,
schema: SchemaMeta,
transforms: list[DocumentTransform] = DEFAULT_SUBGRAPH_TRANSFORMS,
is_subgraph: bool = True,
) -> None:
self._url = url
self._schema = schema
self._transforms = transforms
self._is_subgraph = is_subgraph
# Add objects as attributes
for key, obj in self._schema.type_map.items():
match obj:
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta():
super().__setattr__(key, Object(self, obj))
case _:
pass
def _add_synthetic_field(
self,
object_: TypeMeta.ObjectMeta | TypeMeta.InterfaceMeta,
name: str,
sfield: SyntheticField,
) -> None:
fmeta = TypeMeta.FieldMeta(
name=name, description="", args=[], type=sfield._type
)
object_.fields.append(fmeta)
sfield_fpath = FieldPath(
self,
TypeRef.Named(name=object_.name, kind="OBJECT"),
sfield._type,
[(None, fmeta)],
)
logger.debug(
"Subgraph: Adding SyntheticField at FieldPath"
f" {sfield_fpath._root_type.name}.{sfield_fpath._name_path()}"
)
transform = LocalSyntheticField(
self,
fmeta,
object_,
sfield._f,
sfield._default,
list(sfield._deps | map(FieldPath._selection)),
)
self._transforms = [transform, *self._transforms]
|
(url: 'str', schema: 'SchemaMeta', transforms: 'list[DocumentTransform]' = [<subgrounds.transform.transforms.TypeTransform object at 0x7f8aea508400>, <subgrounds.transform.transforms.TypeTransform object at 0x7f8aea5091e0>], is_subgraph: 'bool' = True) -> 'None'
|
709,003
|
subgrounds.subgraph.subgraph
|
__eq__
| null |
""" Subgraph module that defines various classes to manipulate requests and
subgraphs.
This module is the glue that connects the lower level modules (i.e.:
:module:`query`, :module:`schema`, :module:`transform`, :module:`pagination`) to
the higher toplevel modules (i.e.: :module:`subgrounds`).
"""
from __future__ import annotations
import logging
import warnings
from dataclasses import dataclass, field
from pipe import map
from subgrounds.schema import SchemaMeta, TypeMeta, TypeRef
from subgrounds.transform import (
DEFAULT_SUBGRAPH_TRANSFORMS,
DocumentTransform,
LocalSyntheticField,
)
from .fieldpath import FieldPath, SyntheticField
from .object import Object
logger = logging.getLogger("subgrounds")
warnings.simplefilter("default")
@dataclass
class Subgraph:
_url: str
_schema: SchemaMeta
_transforms: list[DocumentTransform] = field(default_factory=list)
_is_subgraph: bool = True
def __init__(
self,
url: str,
schema: SchemaMeta,
transforms: list[DocumentTransform] = DEFAULT_SUBGRAPH_TRANSFORMS,
is_subgraph: bool = True,
) -> None:
self._url = url
self._schema = schema
self._transforms = transforms
self._is_subgraph = is_subgraph
# Add objects as attributes
for key, obj in self._schema.type_map.items():
match obj:
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta():
super().__setattr__(key, Object(self, obj))
case _:
pass
def _add_synthetic_field(
self,
object_: TypeMeta.ObjectMeta | TypeMeta.InterfaceMeta,
name: str,
sfield: SyntheticField,
) -> None:
fmeta = TypeMeta.FieldMeta(
name=name, description="", args=[], type=sfield._type
)
object_.fields.append(fmeta)
sfield_fpath = FieldPath(
self,
TypeRef.Named(name=object_.name, kind="OBJECT"),
sfield._type,
[(None, fmeta)],
)
logger.debug(
"Subgraph: Adding SyntheticField at FieldPath"
f" {sfield_fpath._root_type.name}.{sfield_fpath._name_path()}"
)
transform = LocalSyntheticField(
self,
fmeta,
object_,
sfield._f,
sfield._default,
list(sfield._deps | map(FieldPath._selection)),
)
self._transforms = [transform, *self._transforms]
|
(self, other)
|
709,004
|
subgrounds.subgraph.subgraph
|
__init__
| null |
def __init__(
self,
url: str,
schema: SchemaMeta,
transforms: list[DocumentTransform] = DEFAULT_SUBGRAPH_TRANSFORMS,
is_subgraph: bool = True,
) -> None:
self._url = url
self._schema = schema
self._transforms = transforms
self._is_subgraph = is_subgraph
# Add objects as attributes
for key, obj in self._schema.type_map.items():
match obj:
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta():
super().__setattr__(key, Object(self, obj))
case _:
pass
|
(self, url: str, schema: subgrounds.schema.SchemaMeta, transforms: list[subgrounds.transform.base.DocumentTransform] = [<subgrounds.transform.transforms.TypeTransform object at 0x7f8aea508400>, <subgrounds.transform.transforms.TypeTransform object at 0x7f8aea5091e0>], is_subgraph: bool = True) -> NoneType
|
709,006
|
subgrounds.subgraph.subgraph
|
_add_synthetic_field
| null |
def _add_synthetic_field(
self,
object_: TypeMeta.ObjectMeta | TypeMeta.InterfaceMeta,
name: str,
sfield: SyntheticField,
) -> None:
fmeta = TypeMeta.FieldMeta(
name=name, description="", args=[], type=sfield._type
)
object_.fields.append(fmeta)
sfield_fpath = FieldPath(
self,
TypeRef.Named(name=object_.name, kind="OBJECT"),
sfield._type,
[(None, fmeta)],
)
logger.debug(
"Subgraph: Adding SyntheticField at FieldPath"
f" {sfield_fpath._root_type.name}.{sfield_fpath._name_path()}"
)
transform = LocalSyntheticField(
self,
fmeta,
object_,
sfield._f,
sfield._default,
list(sfield._deps | map(FieldPath._selection)),
)
self._transforms = [transform, *self._transforms]
|
(self, object_: subgrounds.schema.TypeMeta.ObjectMeta | subgrounds.schema.TypeMeta.InterfaceMeta, name: str, sfield: subgrounds.subgraph.fieldpath.SyntheticField) -> NoneType
|
709,007
|
subgrounds.client.sync
|
Subgrounds
| null |
class Subgrounds(SubgroundsBase):
@cached_property
def _client(self):
"""Cached client"""
return httpx.Client(http2=HTTP2_SUPPORT, timeout=self.timeout)
def load(
self,
url: str,
save_schema: bool = False,
cache_dir: str | None = None,
is_subgraph: bool = True,
) -> Subgraph:
if cache_dir is not None:
warnings.warn("This will be depreciated", DeprecationWarning)
try:
loader = self._load(url, save_schema, is_subgraph)
url, query = next(loader) # if this fails, schema is loaded from cache
data = self._fetch(url, {"query": query})
loader.send(data)
except StopIteration as e:
return e.value
assert False
def load_subgraph(
self, url: str, save_schema: bool = False, cache_dir: str | None = None
) -> Subgraph:
"""Performs introspection on the provided GraphQL API ``url`` to get the
schema, stores the schema if ``save_schema`` is ``True`` and returns a
generated class representing the subgraph with all its entities.
Args:
url The url of the API.
save_schema: Flag indicating whether or not the schema should be cached to
disk.
Returns:
Subgraph: A generated class representing the subgraph and its entities
"""
return self.load(url, save_schema, cache_dir, True)
def load_api(
self, url: str, save_schema: bool = False, cache_dir: str | None = None
) -> Subgraph:
"""Performs introspection on the provided GraphQL API ``url`` to get the
schema, stores the schema if ``save_schema`` is ``True`` and returns a
generated class representing the GraphQL endpoint with all its entities.
Args:
url: The url of the API.
save_schema: Flag indicating whether or not the schema should be saved
to disk.
Returns:
A generated class representing the subgraph and its entities
"""
return self.load(url, save_schema, cache_dir, False)
def execute(
self,
req: DataRequest,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> DataResponse:
"""Executes a :class:`DataRequest` and returns a :class:`DataResponse`.
Args:
req: The :class:`DataRequest` object to be executed.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
A :class:`DataResponse` object representing the response
"""
try:
executor = self._execute(req, pagination_strategy)
doc = next(executor)
while True:
data = self._fetch(
doc.url, {"query": doc.graphql, "variables": doc.variables}
)
doc = executor.send(DocumentResponse(url=doc.url, data=data))
except StopIteration as e:
return e.value
def execute_iter(
self,
req: DataRequest,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[DocumentResponse]:
"""Same as `execute`, except that an iterator is returned which will iterate
the data pages.
Args:
req: The :class:`DataRequest` object to be executed
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the :class:`DocumentResponse` pages.
⚠️ DOES NOT apply global transforms across multiple documents or their pages.
Since we yield each page as we get it, it's not possible to accurately perform
the transforms since we don't collect the pages. This means transforms
expecting multiple documents or pages of documents will be inaccurate.
"""
with suppress(StopIteration):
executor = self._execute_iter(req, pagination_strategy)
while True:
doc = cast(Document, next(executor))
data = self._fetch(
doc.url, {"query": doc.graphql, "variables": doc.variables}
)
yield cast(
DocumentResponse,
executor.send(DocumentResponse(url=doc.url, data=data)),
)
def query_json(
self,
fpaths: FieldPath | list[FieldPath],
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> list[dict[str, Any]]:
"""Equivalent to
``Subgrounds.execute(Subgrounds.mk_request(fpaths), pagination_strategy)``.
Args:
fpaths: One or more :class:`FieldPath` objects
that should be included in the request.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
The reponse data
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
req = self.mk_request(fpaths)
data = self.execute(req, pagination_strategy)
return [doc.data for doc in data.responses]
def query_json_iter(
self,
fpaths: FieldPath | list[FieldPath],
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[dict[str, Any]]:
"""Same as `query_json` returns an iterator over the response data pages.
Args:
fpaths: One or more :class:`FieldPath` objects that should be included
in the request.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
list[dict[str, Any]]: The reponse data
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
req = self.mk_request(fpaths)
responses = self.execute_iter(req, pagination_strategy)
for resp in responses:
yield resp.data
def query_df(
self,
fpaths: FieldPath | list[FieldPath],
columns: list[str] | None = None,
concat: bool = False,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> pd.DataFrame | list[pd.DataFrame]:
"""Same as :func:`Subgrounds.query` but formats the response data into a
Pandas DataFrame. If the response data cannot be flattened to a single query
(e.g.: when querying multiple list fields that return different entities),
then multiple dataframes are returned
``fpaths`` is a list of :class:`FieldPath` objects that indicate which
data must be queried.
``columns`` is an optional argument used to rename the dataframes(s)
columns. The length of ``columns`` must be the same as the number of columns
of *all* returned dataframes.
``concat`` indicates whether or not the resulting dataframes should be
concatenated together. The dataframes must have the same number of columns,
as well as the same column names and types (the names can be set using the
``columns`` argument).
Args:
fpaths: One or more `FieldPath` objects that should be included in the request
columns: The column labels. Defaults to None.
merge: Whether or not to merge resulting dataframes.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
A :class:`pandas.DataFrame` containing the reponse data.
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3')
# Define price SyntheticField
>>> univ3.Swap.price = abs(univ3.Swap.amount0) / abs(univ3.Swap.amount1)
# Query last 10 swaps from the ETH/USDC pool
>>> eth_usdc = univ3.Query.swaps(
... orderBy=univ3.Swap.timestamp,
... orderDirection='desc',
... first=10,
... where=[
... univ3.Swap.pool == '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8'
... ]
... )
>>> sg.query_df([
... eth_usdc.timestamp,
... eth_usdc.price
... ])
swaps_timestamp swaps_price
0 1643213811 2618.886394
1 1643213792 2618.814281
2 1643213792 2617.500494
3 1643213763 2615.458495
4 1643213763 2615.876574
5 1643213739 2615.352390
6 1643213678 2615.205713
7 1643213370 2614.115746
8 1643213210 2613.077301
9 1643213196 2610.686563
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
json_data = self.query_json(fpaths, pagination_strategy=pagination_strategy)
return df_of_json(json_data, fpaths, columns, concat)
def query_df_iter(
self,
fpaths: FieldPath | list[FieldPath],
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[pd.DataFrame | list[pd.DataFrame]]:
"""Same as `query_df` except returns an iterator over the response data pages
Args:
fpaths: One or more `FieldPath` objects that should be included in the request
columns: The column labels. Defaults to None.
merge: Whether or not to merge resulting dataframes.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the response data pages, each as a :class:`pandas.DataFrame`.
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
for page in self.query_json_iter(fpaths, pagination_strategy):
yield df_of_json(page, fpaths, None, False)
def query(
self,
fpaths: FieldPath | list[FieldPath],
unwrap: bool = True,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> str | int | float | bool | list | tuple | None:
"""Executes one or multiple ``FieldPath`` objects immediately and returns the
data (as a tuple if multiple ``FieldPath`` objects are provided).
Args:
fpaths: One or more ``FieldPath`` object(s) to query.
unwrap: Flag indicating whether or not, in the case where the returned data
is a list of one element, the element itself should be returned instead of
the list. Defaults to ``True``.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
The ``FieldPath`` object(s) data
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3')
# Define price SyntheticField
>>> univ3.Swap.price = abs(univ3.Swap.amount0) / abs(univ3.Swap.amount1)
# Construct FieldPath to get price of last swap on ETH/USDC pool
>>> eth_usdc_last = univ3.Query.swaps(
... orderBy=univ3.Swap.timestamp,
... orderDirection='desc',
... first=1,
... where=[
... univ3.Swap.pool == '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8'
... ]
... ).price
# Query last price FieldPath
>>> sg.query(eth_usdc_last)
2628.975030015892
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
blob = self.query_json(fpaths, pagination_strategy=pagination_strategy)
def f(fpath: FieldPath) -> dict[str, Any]:
data = fpath._extract_data(blob)
if type(data) == list and len(data) == 1 and unwrap:
return data[0]
return data
data = tuple(fpaths | map(f))
if len(data) == 1:
return data[0]
return data
def query_iter(
self,
fpaths: FieldPath | list[FieldPath],
unwrap: bool = True,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[str | int | float | bool | list[Any] | tuple | None]:
"""Same as `query` except an iterator over the resonse data pages is returned.
Args:
fpath: One or more ``FieldPath`` object(s) to query.
unwrap: Flag indicating whether or not, in the case where
the returned data is a list of one element, the element itself should be
returned instead of the list. Defaults to ``True``.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the ``FieldPath`` object(s)' data pages
"""
def f(fpath: FieldPath, blob: dict[str, Any]) -> dict[str, Any]:
data = fpath._extract_data(blob)
if type(data) == list and len(data) == 1 and unwrap:
return data[0]
return data
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
for page in self.query_json_iter(fpaths, pagination_strategy):
data = tuple(fpaths | map(functools.partial(f, blob=page)))
if len(data) == 1:
yield data[0]
else:
yield data
data = tuple(fpaths | map(functools.partial(f, blob=page)))
if len(data) == 1:
yield data[0]
else:
yield data
def _fetch(self, url: str, blob: dict[str, Any]) -> dict[str, Any]:
resp = self._client.post(
url, json=blob, headers=default_header(url) | self.headers
)
resp.raise_for_status()
try:
raw_data = resp.json()
except JSONDecodeError:
raise ServerError(
f"Server ({url}) did not respond with proper JSON"
f"\nDid you query a proper GraphQL endpoint?"
f"\n\n{resp.content}"
)
if (data := raw_data.get("data")) is None:
raise GraphQLError(raw_data.get("errors", "Unknown Error(s) Found"))
return data
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *args):
self._client.__exit__(*args)
|
(timeout: int = 30, headers: dict[str, typing.Any] = <factory>, global_transforms: list[subgrounds.transform.base.RequestTransform] = <factory>, subgraphs: dict[str, subgrounds.subgraph.subgraph.Subgraph] = <factory>, schema_cache: pathlib.Path = PosixPath('schemas')) -> None
|
709,008
|
subgrounds.client.sync
|
__enter__
| null |
def __enter__(self):
self._client.__enter__()
return self
|
(self)
|
709,010
|
subgrounds.client.sync
|
__exit__
| null |
def __exit__(self, *args):
self._client.__exit__(*args)
|
(self, *args)
|
709,016
|
subgrounds.client.sync
|
_fetch
| null |
def _fetch(self, url: str, blob: dict[str, Any]) -> dict[str, Any]:
resp = self._client.post(
url, json=blob, headers=default_header(url) | self.headers
)
resp.raise_for_status()
try:
raw_data = resp.json()
except JSONDecodeError:
raise ServerError(
f"Server ({url}) did not respond with proper JSON"
f"\nDid you query a proper GraphQL endpoint?"
f"\n\n{resp.content}"
)
if (data := raw_data.get("data")) is None:
raise GraphQLError(raw_data.get("errors", "Unknown Error(s) Found"))
return data
|
(self, url: str, blob: dict[str, typing.Any]) -> dict[str, typing.Any]
|
709,019
|
subgrounds.client.sync
|
execute
|
Executes a :class:`DataRequest` and returns a :class:`DataResponse`.
Args:
req: The :class:`DataRequest` object to be executed.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
A :class:`DataResponse` object representing the response
|
def execute(
self,
req: DataRequest,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> DataResponse:
"""Executes a :class:`DataRequest` and returns a :class:`DataResponse`.
Args:
req: The :class:`DataRequest` object to be executed.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
A :class:`DataResponse` object representing the response
"""
try:
executor = self._execute(req, pagination_strategy)
doc = next(executor)
while True:
data = self._fetch(
doc.url, {"query": doc.graphql, "variables": doc.variables}
)
doc = executor.send(DocumentResponse(url=doc.url, data=data))
except StopIteration as e:
return e.value
|
(self, req: subgrounds.query.DataRequest, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> subgrounds.query.DataResponse
|
709,020
|
subgrounds.client.sync
|
execute_iter
|
Same as `execute`, except that an iterator is returned which will iterate
the data pages.
Args:
req: The :class:`DataRequest` object to be executed
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the :class:`DocumentResponse` pages.
⚠️ DOES NOT apply global transforms across multiple documents or their pages.
Since we yield each page as we get it, it's not possible to accurately perform
the transforms since we don't collect the pages. This means transforms
expecting multiple documents or pages of documents will be inaccurate.
|
def execute_iter(
self,
req: DataRequest,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[DocumentResponse]:
"""Same as `execute`, except that an iterator is returned which will iterate
the data pages.
Args:
req: The :class:`DataRequest` object to be executed
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the :class:`DocumentResponse` pages.
⚠️ DOES NOT apply global transforms across multiple documents or their pages.
Since we yield each page as we get it, it's not possible to accurately perform
the transforms since we don't collect the pages. This means transforms
expecting multiple documents or pages of documents will be inaccurate.
"""
with suppress(StopIteration):
executor = self._execute_iter(req, pagination_strategy)
while True:
doc = cast(Document, next(executor))
data = self._fetch(
doc.url, {"query": doc.graphql, "variables": doc.variables}
)
yield cast(
DocumentResponse,
executor.send(DocumentResponse(url=doc.url, data=data)),
)
|
(self, req: subgrounds.query.DataRequest, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> collections.abc.Iterator[subgrounds.query.DocumentResponse]
|
709,022
|
subgrounds.client.sync
|
load
| null |
def load(
self,
url: str,
save_schema: bool = False,
cache_dir: str | None = None,
is_subgraph: bool = True,
) -> Subgraph:
if cache_dir is not None:
warnings.warn("This will be depreciated", DeprecationWarning)
try:
loader = self._load(url, save_schema, is_subgraph)
url, query = next(loader) # if this fails, schema is loaded from cache
data = self._fetch(url, {"query": query})
loader.send(data)
except StopIteration as e:
return e.value
assert False
|
(self, url: str, save_schema: bool = False, cache_dir: Optional[str] = None, is_subgraph: bool = True) -> subgrounds.subgraph.subgraph.Subgraph
|
709,023
|
subgrounds.client.sync
|
load_api
|
Performs introspection on the provided GraphQL API ``url`` to get the
schema, stores the schema if ``save_schema`` is ``True`` and returns a
generated class representing the GraphQL endpoint with all its entities.
Args:
url: The url of the API.
save_schema: Flag indicating whether or not the schema should be saved
to disk.
Returns:
A generated class representing the subgraph and its entities
|
def load_api(
self, url: str, save_schema: bool = False, cache_dir: str | None = None
) -> Subgraph:
"""Performs introspection on the provided GraphQL API ``url`` to get the
schema, stores the schema if ``save_schema`` is ``True`` and returns a
generated class representing the GraphQL endpoint with all its entities.
Args:
url: The url of the API.
save_schema: Flag indicating whether or not the schema should be saved
to disk.
Returns:
A generated class representing the subgraph and its entities
"""
return self.load(url, save_schema, cache_dir, False)
|
(self, url: str, save_schema: bool = False, cache_dir: Optional[str] = None) -> subgrounds.subgraph.subgraph.Subgraph
|
709,024
|
subgrounds.client.sync
|
load_subgraph
|
Performs introspection on the provided GraphQL API ``url`` to get the
schema, stores the schema if ``save_schema`` is ``True`` and returns a
generated class representing the subgraph with all its entities.
Args:
url The url of the API.
save_schema: Flag indicating whether or not the schema should be cached to
disk.
Returns:
Subgraph: A generated class representing the subgraph and its entities
|
def load_subgraph(
self, url: str, save_schema: bool = False, cache_dir: str | None = None
) -> Subgraph:
"""Performs introspection on the provided GraphQL API ``url`` to get the
schema, stores the schema if ``save_schema`` is ``True`` and returns a
generated class representing the subgraph with all its entities.
Args:
url The url of the API.
save_schema: Flag indicating whether or not the schema should be cached to
disk.
Returns:
Subgraph: A generated class representing the subgraph and its entities
"""
return self.load(url, save_schema, cache_dir, True)
|
(self, url: str, save_schema: bool = False, cache_dir: Optional[str] = None) -> subgrounds.subgraph.subgraph.Subgraph
|
709,027
|
subgrounds.client.sync
|
query
|
Executes one or multiple ``FieldPath`` objects immediately and returns the
data (as a tuple if multiple ``FieldPath`` objects are provided).
Args:
fpaths: One or more ``FieldPath`` object(s) to query.
unwrap: Flag indicating whether or not, in the case where the returned data
is a list of one element, the element itself should be returned instead of
the list. Defaults to ``True``.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
The ``FieldPath`` object(s) data
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3')
# Define price SyntheticField
>>> univ3.Swap.price = abs(univ3.Swap.amount0) / abs(univ3.Swap.amount1)
# Construct FieldPath to get price of last swap on ETH/USDC pool
>>> eth_usdc_last = univ3.Query.swaps(
... orderBy=univ3.Swap.timestamp,
... orderDirection='desc',
... first=1,
... where=[
... univ3.Swap.pool == '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8'
... ]
... ).price
# Query last price FieldPath
>>> sg.query(eth_usdc_last)
2628.975030015892
|
def query(
self,
fpaths: FieldPath | list[FieldPath],
unwrap: bool = True,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> str | int | float | bool | list | tuple | None:
"""Executes one or multiple ``FieldPath`` objects immediately and returns the
data (as a tuple if multiple ``FieldPath`` objects are provided).
Args:
fpaths: One or more ``FieldPath`` object(s) to query.
unwrap: Flag indicating whether or not, in the case where the returned data
is a list of one element, the element itself should be returned instead of
the list. Defaults to ``True``.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
The ``FieldPath`` object(s) data
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3')
# Define price SyntheticField
>>> univ3.Swap.price = abs(univ3.Swap.amount0) / abs(univ3.Swap.amount1)
# Construct FieldPath to get price of last swap on ETH/USDC pool
>>> eth_usdc_last = univ3.Query.swaps(
... orderBy=univ3.Swap.timestamp,
... orderDirection='desc',
... first=1,
... where=[
... univ3.Swap.pool == '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8'
... ]
... ).price
# Query last price FieldPath
>>> sg.query(eth_usdc_last)
2628.975030015892
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
blob = self.query_json(fpaths, pagination_strategy=pagination_strategy)
def f(fpath: FieldPath) -> dict[str, Any]:
data = fpath._extract_data(blob)
if type(data) == list and len(data) == 1 and unwrap:
return data[0]
return data
data = tuple(fpaths | map(f))
if len(data) == 1:
return data[0]
return data
|
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], unwrap: bool = True, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> str | int | float | bool | list | tuple | None
|
709,028
|
subgrounds.client.sync
|
query_df
|
Same as :func:`Subgrounds.query` but formats the response data into a
Pandas DataFrame. If the response data cannot be flattened to a single query
(e.g.: when querying multiple list fields that return different entities),
then multiple dataframes are returned
``fpaths`` is a list of :class:`FieldPath` objects that indicate which
data must be queried.
``columns`` is an optional argument used to rename the dataframes(s)
columns. The length of ``columns`` must be the same as the number of columns
of *all* returned dataframes.
``concat`` indicates whether or not the resulting dataframes should be
concatenated together. The dataframes must have the same number of columns,
as well as the same column names and types (the names can be set using the
``columns`` argument).
Args:
fpaths: One or more `FieldPath` objects that should be included in the request
columns: The column labels. Defaults to None.
merge: Whether or not to merge resulting dataframes.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
A :class:`pandas.DataFrame` containing the reponse data.
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3')
# Define price SyntheticField
>>> univ3.Swap.price = abs(univ3.Swap.amount0) / abs(univ3.Swap.amount1)
# Query last 10 swaps from the ETH/USDC pool
>>> eth_usdc = univ3.Query.swaps(
... orderBy=univ3.Swap.timestamp,
... orderDirection='desc',
... first=10,
... where=[
... univ3.Swap.pool == '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8'
... ]
... )
>>> sg.query_df([
... eth_usdc.timestamp,
... eth_usdc.price
... ])
swaps_timestamp swaps_price
0 1643213811 2618.886394
1 1643213792 2618.814281
2 1643213792 2617.500494
3 1643213763 2615.458495
4 1643213763 2615.876574
5 1643213739 2615.352390
6 1643213678 2615.205713
7 1643213370 2614.115746
8 1643213210 2613.077301
9 1643213196 2610.686563
|
def query_df(
self,
fpaths: FieldPath | list[FieldPath],
columns: list[str] | None = None,
concat: bool = False,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> pd.DataFrame | list[pd.DataFrame]:
"""Same as :func:`Subgrounds.query` but formats the response data into a
Pandas DataFrame. If the response data cannot be flattened to a single query
(e.g.: when querying multiple list fields that return different entities),
then multiple dataframes are returned
``fpaths`` is a list of :class:`FieldPath` objects that indicate which
data must be queried.
``columns`` is an optional argument used to rename the dataframes(s)
columns. The length of ``columns`` must be the same as the number of columns
of *all* returned dataframes.
``concat`` indicates whether or not the resulting dataframes should be
concatenated together. The dataframes must have the same number of columns,
as well as the same column names and types (the names can be set using the
``columns`` argument).
Args:
fpaths: One or more `FieldPath` objects that should be included in the request
columns: The column labels. Defaults to None.
merge: Whether or not to merge resulting dataframes.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
A :class:`pandas.DataFrame` containing the reponse data.
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3')
# Define price SyntheticField
>>> univ3.Swap.price = abs(univ3.Swap.amount0) / abs(univ3.Swap.amount1)
# Query last 10 swaps from the ETH/USDC pool
>>> eth_usdc = univ3.Query.swaps(
... orderBy=univ3.Swap.timestamp,
... orderDirection='desc',
... first=10,
... where=[
... univ3.Swap.pool == '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8'
... ]
... )
>>> sg.query_df([
... eth_usdc.timestamp,
... eth_usdc.price
... ])
swaps_timestamp swaps_price
0 1643213811 2618.886394
1 1643213792 2618.814281
2 1643213792 2617.500494
3 1643213763 2615.458495
4 1643213763 2615.876574
5 1643213739 2615.352390
6 1643213678 2615.205713
7 1643213370 2614.115746
8 1643213210 2613.077301
9 1643213196 2610.686563
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
json_data = self.query_json(fpaths, pagination_strategy=pagination_strategy)
return df_of_json(json_data, fpaths, columns, concat)
|
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], columns: Optional[list[str]] = None, concat: bool = False, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> pandas.core.frame.DataFrame | list[pandas.core.frame.DataFrame]
|
709,029
|
subgrounds.client.sync
|
query_df_iter
|
Same as `query_df` except returns an iterator over the response data pages
Args:
fpaths: One or more `FieldPath` objects that should be included in the request
columns: The column labels. Defaults to None.
merge: Whether or not to merge resulting dataframes.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the response data pages, each as a :class:`pandas.DataFrame`.
|
def query_df_iter(
self,
fpaths: FieldPath | list[FieldPath],
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[pd.DataFrame | list[pd.DataFrame]]:
"""Same as `query_df` except returns an iterator over the response data pages
Args:
fpaths: One or more `FieldPath` objects that should be included in the request
columns: The column labels. Defaults to None.
merge: Whether or not to merge resulting dataframes.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the response data pages, each as a :class:`pandas.DataFrame`.
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
for page in self.query_json_iter(fpaths, pagination_strategy):
yield df_of_json(page, fpaths, None, False)
|
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> collections.abc.Iterator[pandas.core.frame.DataFrame | list[pandas.core.frame.DataFrame]]
|
709,030
|
subgrounds.client.sync
|
query_iter
|
Same as `query` except an iterator over the resonse data pages is returned.
Args:
fpath: One or more ``FieldPath`` object(s) to query.
unwrap: Flag indicating whether or not, in the case where
the returned data is a list of one element, the element itself should be
returned instead of the list. Defaults to ``True``.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the ``FieldPath`` object(s)' data pages
|
def query_iter(
self,
fpaths: FieldPath | list[FieldPath],
unwrap: bool = True,
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[str | int | float | bool | list[Any] | tuple | None]:
"""Same as `query` except an iterator over the resonse data pages is returned.
Args:
fpath: One or more ``FieldPath`` object(s) to query.
unwrap: Flag indicating whether or not, in the case where
the returned data is a list of one element, the element itself should be
returned instead of the list. Defaults to ``True``.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
An iterator over the ``FieldPath`` object(s)' data pages
"""
def f(fpath: FieldPath, blob: dict[str, Any]) -> dict[str, Any]:
data = fpath._extract_data(blob)
if type(data) == list and len(data) == 1 and unwrap:
return data[0]
return data
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
for page in self.query_json_iter(fpaths, pagination_strategy):
data = tuple(fpaths | map(functools.partial(f, blob=page)))
if len(data) == 1:
yield data[0]
else:
yield data
data = tuple(fpaths | map(functools.partial(f, blob=page)))
if len(data) == 1:
yield data[0]
else:
yield data
|
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], unwrap: bool = True, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> collections.abc.Iterator[str | int | float | bool | list[typing.Any] | tuple | None]
|
709,031
|
subgrounds.client.sync
|
query_json
|
Equivalent to
``Subgrounds.execute(Subgrounds.mk_request(fpaths), pagination_strategy)``.
Args:
fpaths: One or more :class:`FieldPath` objects
that should be included in the request.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
The reponse data
|
def query_json(
self,
fpaths: FieldPath | list[FieldPath],
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> list[dict[str, Any]]:
"""Equivalent to
``Subgrounds.execute(Subgrounds.mk_request(fpaths), pagination_strategy)``.
Args:
fpaths: One or more :class:`FieldPath` objects
that should be included in the request.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
The reponse data
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
req = self.mk_request(fpaths)
data = self.execute(req, pagination_strategy)
return [doc.data for doc in data.responses]
|
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> list[dict[str, typing.Any]]
|
709,032
|
subgrounds.client.sync
|
query_json_iter
|
Same as `query_json` returns an iterator over the response data pages.
Args:
fpaths: One or more :class:`FieldPath` objects that should be included
in the request.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
list[dict[str, Any]]: The reponse data
|
def query_json_iter(
self,
fpaths: FieldPath | list[FieldPath],
pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy,
) -> Iterator[dict[str, Any]]:
"""Same as `query_json` returns an iterator over the response data pages.
Args:
fpaths: One or more :class:`FieldPath` objects that should be included
in the request.
pagination_strategy: A Class implementing the :class:`PaginationStrategy`
``Protocol``. If ``None``, then automatic pagination is disabled.
Defaults to :class:`LegacyStrategy`.
Returns:
list[dict[str, Any]]: The reponse data
"""
fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse)
req = self.mk_request(fpaths)
responses = self.execute_iter(req, pagination_strategy)
for resp in responses:
yield resp.data
|
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> collections.abc.Iterator[dict[str, typing.Any]]
|
709,033
|
subgrounds.subgraph.fieldpath
|
SyntheticField
|
SyntheticField(f: 'Callable', type_: 'TypeRef.T', deps: 'list[FieldPath | SyntheticField] | FieldPath | SyntheticField', default: 'Any' = None) -> 'None'
|
class SyntheticField(FieldOperatorMixin):
STRING: ClassVar[TypeRef.Named] = TypeRef.Named(name="String", kind="SCALAR")
INT: ClassVar[TypeRef.Named] = TypeRef.Named(name="Int", kind="SCALAR")
FLOAT: ClassVar[TypeRef.Named] = TypeRef.Named(name="Float", kind="SCALAR")
BOOL: ClassVar[TypeRef.Named] = TypeRef.Named(name="Boolean", kind="SCALAR")
_counter: ClassVar[int] = 0
_f: Callable
_type: TypeRef.T
_default: Any
_deps: list[FieldPath]
def __init__(
self,
f: Callable,
type_: TypeRef.T,
deps: list[FieldPath | SyntheticField] | FieldPath | SyntheticField,
default: Any = None,
) -> None:
deps = list([deps] | traverse)
def mk_deps(
deps: list[FieldPath | SyntheticField],
f: Callable,
acc: list[tuple[Callable | None, int]] = [],
) -> tuple[Callable, list[FieldPath]]:
"""If all dependencies are field paths, then this function does nothing.
If the dependencies contain one or more other synthetic fields, as is the
case when chaining binary operators, then the synthetic field tree is
flattened to a single synthetic field containing all leaf dependencies.
Args:
deps: Initial dependencies for synthetic field
f: Function to apply to the values of those dependencies
acc: Accumulator. Defaults to [].
Returns:
A tuple containing the potentially modified function and dependency list.
"""
match deps:
case []:
def new_f(*args):
new_args = []
_counter = 0
for f_, deps in acc:
match (f_, deps):
case (None, FieldPath()):
new_args.append(args[_counter])
_counter += 1
case (
None,
int() | float() | str() | bool() as constant,
):
new_args.append(constant)
case (f_, list() as deps):
new_args.append(
f_(*args[_counter : _counter + len(deps)])
)
_counter += len(deps)
return f(*new_args)
new_deps = []
for _, deps in acc:
match deps:
case FieldPath() as dep:
new_deps.append(dep)
case int() | float() | str() | bool():
pass
case list() as deps:
new_deps = new_deps + deps
return (new_f, new_deps)
case [SyntheticField(_f=inner_f, _deps=inner_deps), *rest]:
acc.append((inner_f, inner_deps))
return mk_deps(rest, f, acc)
case [FieldPath() as dep, *rest]:
acc.append((None, dep))
return mk_deps(rest, f, acc)
case [int() | float() | str() | bool() as constant, *rest]:
acc.append((None, constant))
return mk_deps(rest, f, acc)
case _ as deps:
raise TypeError(f"mk_deps: unexpected argument {deps}")
(f, deps) = mk_deps(deps, f)
self._f = f
self._type = type_
self._default = (
default if default is not None else SyntheticField.default_of_type(type_)
)
self._deps = deps
SyntheticField._counter += 1
@staticmethod
def default_of_type(type_: TypeRef.T):
match type_.name:
case "String":
return ""
case "Int":
return 0
case "Float":
return 0.0
case "Boolean":
return False
case _:
return 0
@classmethod
def constant(cls, value: str | int | float | bool) -> SyntheticField:
"""Returns a constant ``SyntheticField`` with value ``value``.
Useful for injecting additional static data to a schema or merging entities.
Args:
value: The constant field's value
Returns:
The constant ``SyntheticField``
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds, SyntheticField
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... "https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3"
... )
# Create constant SyntheticFields
>>> univ3.Mint.tx_type = SyntheticField.constant('MINT')
>>> univ3.Burn.tx_type = SyntheticField.constant('BURN')
# Last 10 mints and burns
>>> mints = univ3.Query.mints(
... first=10,
... orderBy=univ3.Mint.timestamp,
... orderDirection='desc'
... )
>>> burns = univ3.Query.burns(
... first=10,
... orderBy=univ3.Burn.timestamp,
... orderDirection='desc'
... )
# Query mints and burns. Notice that we merge the two entity tables by
# setting `concat=True` and overwriting the column names (columns must
# match the `FieldPaths`)
>>> df = sg.query_df(
... [
... mints.transaction.id,
... mints.timestamp,
... mints.tx_type,
... mints.origin,
... mints.amountUSD,
... burns.transaction.id,
... burns.timestamp,
... burns.tx_type,
... burns.origin,
... burns.amountUSD,
... ],
... columns=['tx_hash', 'timestamp', 'tx_type', 'origin', 'amount_USD'],
... concat=True
.. )
# Sort the DataFrame (output is truncated)
>>> df.sort_values(by=['timestamp'], ascending=False)
tx_hash timestamp tx_type origin amount_USD
0xcbe1... 1656016553 MINT 0x3435.... 7.879784e+03
0xddda... 1656016284 MINT 0xc747.... 5.110840e+04
0xa767... 1656016284 BURN 0xd40d.... 2.804573e+05
0xc132... 1656015853 MINT 0xc747.... 5.122569e+04
0x1444... 1656015773 MINT 0xd11a.... 8.897983e+03
0x3315... 1656015693 MINT 0xb7dd.... 0.000000e+00
0xcc71... 1656015278 BURN 0xa7c4.... 1.254942e+06
0x7bbf... 1656015111 MINT 0xac56.... 3.432075e+04
0xea21... 1656014785 BURN 0x0709.... 2.059106e+04
0x3bd3... 1656014120 MINT 0x5099.... 2.517578e+03
0x1ea5... 1656014018 BURN 0x5099.... 0.000000e+00
0xb9d3... 1656013998 MINT 0x22df.... 8.365750e+04
0xc5e3... 1656013946 BURN 0xac56.... 3.363809e+04
0x7c73... 1656013913 MINT 0x4ce6.... 4.837287e+04
0x95cf... 1656013728 BURN 0x4ce6.... 5.110010e+04
0x76dd... 1656013599 MINT 0x234a.... 5.363896e+03
0x47e5... 1656013580 BURN 0xaf0f.... 0.000000e+00
0xe20e... 1656013455 BURN 0xaf0f.... 0.000000e+00
0xac3e... 1656013237 BURN 0x665d.... 2.254100e+05
0x01c3... 1656013034 BURN 0x0084.... 0.000000e+00
"""
match value:
case str():
return cls(lambda: value, cls.STRING, [])
case int():
return cls(lambda: value, cls.INT, [])
case float():
return cls(lambda: value, cls.FLOAT, [])
case bool():
return cls(lambda: value, cls.BOOL, [])
@classmethod
def datetime_of_timestamp(cls, timestamp: FieldPath | SyntheticField) -> Self:
"""Returns a ``SyntheticField`` that will transform the ``FieldPath``
``timestamp`` into a human-readable ISO8601 string.
Args:
timestamp: A ``FieldPath`` representing a Unix timestamp field.
Returns:
An ISO8601 datetime string ``SyntheticField``.
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds, SyntheticField
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... "https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3"
... )
# Create datetime SyntheticField
>>> univ3.Swap.datetime = SyntheticField.datetime_of_timestamp(
... univ3.Swap.timestamp
... )
# Query 100 swaps
>>> sg.query_df([
... univ3.Query.swaps.timestamp,
... univ3.Query.swaps.datetime,
... ])
swaps_timestamp swaps_datetime
0 1625105710 2021-06-30 22:15:10
1 1629253724 2021-08-17 22:28:44
2 1647333277 2022-03-15 04:34:37
3 1630801974 2021-09-04 20:32:54
4 1653240241 2022-05-22 13:24:01
.. ... ...
95 1646128326 2022-03-01 04:52:06
96 1646128326 2022-03-01 04:52:06
97 1626416555 2021-07-16 02:22:35
98 1626416555 2021-07-16 02:22:35
99 1625837291 2021-07-09 09:28:11
"""
return SyntheticField(
lambda timestamp: str(datetime.fromtimestamp(timestamp)),
SyntheticField.STRING,
timestamp,
)
@classmethod
def map(
cls,
dict: dict[Any, Any],
type_: TypeRef.T,
fpath: FieldPath | SyntheticField,
default: Any | None = None,
) -> Self:
"""Returns a SyntheticField that will map the values of ``fpath`` using the
key value pairs in ``dict``. If a value is not in the dictionary, then
``default`` will be used instead.
Args:
dict: The dictionary containing the key value pairs used to map
``fpath``'s values
type_: The type of the resulting field
fpath: The FieldPath whose values will be mapped using the dictionary
default: Default value used when a value is not in the provided dictionary
Returns:
A map SyntheticField
Example:
.. code-block:: python
>>> from subgrounds import Subgrounds, SyntheticField
>>> sg = Subgrounds()
>>> univ3 = sg.load_subgraph(
... 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3'
... )
# Hand-crafted mapping of pool addresses to symbol
>>> pooladdr_symbol_map = {
... '0x5777d92f208679db4b9778590fa3cab3ac9e2168': 'DAI/USDC-001',
... '0x6c6bc977e13df9b0de53b251522280bb72383700': 'DAI/USDC-005',
... '0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8': 'USDC/ETH-030',
... '0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640': 'USDC/ETH-005',
... }
# Create map SyntheticField using our dictionary with 'UNKNOWN' as the
# default value
>>> univ3.Pool.symbol = SyntheticField.map(
... pooladdr_symbol_map,
... SyntheticField.STRING,
... univ3.Pool.id,
... 'UNKNOWN'
... )
# Query top 10 pools by TVL
>>> pools = univ3.Query.pools(
... first=10,
... orderBy=univ3.Pool.totalValueLockedUSD,
... orderDirection='desc'
... )
>>> sg.query_df([
... pools.id,
... pools.symbol
... ])
pools_id pools_symbol
0 0xa850478adaace4c08fc61de44d8cf3b64f359bec UNKNOWN
1 0x5777d92f208679db4b9778590fa3cab3ac9e2168 DAI/USDC-001
2 0x6c6bc977e13df9b0de53b251522280bb72383700 DAI/USDC-005
3 0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8 USDC/ETH-030
4 0x88e6a0c2ddd26feeb64f039a2c41296fcb3f5640 USDC/ETH-005
5 0x3416cf6c708da44db2624d63ea0aaef7113527c6 UNKNOWN
6 0xcbcdf9626bc03e24f779434178a73a0b4bad62ed UNKNOWN
7 0xc63b0708e2f7e69cb8a1df0e1389a98c35a76d52 UNKNOWN
8 0x4585fe77225b41b697c938b018e2ac67ac5a20c0 UNKNOWN
9 0x4e68ccd3e89f51c3074ca5072bbac773960dfa36 UNKNOWN
"""
return SyntheticField(
lambda value: dict[value] if value in dict else default, type_, fpath
)
|
(f: 'Callable', type_: 'TypeRef.T', deps: 'list[FieldPath | SyntheticField] | FieldPath | SyntheticField', default: 'Any' = None) -> 'None'
|
709,036
|
subgrounds.subgraph.fieldpath
|
__eq__
| null |
from __future__ import annotations
import logging
import operator
import warnings
from dataclasses import dataclass
from datetime import datetime
from functools import partial, reduce
from hashlib import blake2b
from typing import TYPE_CHECKING, Any, Callable, ClassVar
from pipe import map, traverse
from typing_extensions import Self # 3.10 support
from subgrounds.query import Query, Selection, arguments_of_field_args
from subgrounds.schema import SchemaMeta, TypeMeta, TypeRef
from subgrounds.utils import extract_data
from .filter import Filter
if TYPE_CHECKING:
from .subgraph import Subgraph
logger = logging.getLogger("subgrounds")
warnings.simplefilter("default")
FPATH_DEPTH_LIMIT: int = 4
def typeref_of_binary_op(
op: str, t1: TypeRef.T, t2: int | float | str | bool | FieldPath | SyntheticField
):
def f_typeref(t1, t2):
match (op, TypeRef.root_type_name(t1), TypeRef.root_type_name(t2)):
case ("add", "String" | "Bytes", "String" | "Bytes"):
return TypeRef.Named(name="String", kind="SCALAR")
case (
"add" | "sub" | "mul" | "div" | "pow" | "mod",
"BigInt" | "Int",
"BigInt" | "Int",
):
return TypeRef.Named(name="Int", kind="SCALAR")
case (
"add" | "sub" | "mul" | "div" | "pow",
"BigInt" | "Int",
"BigDecimal" | "Float",
):
return TypeRef.Named(name="Float", kind="SCALAR")
case (
"add" | "sub" | "mul" | "div" | "pow",
"BigDecimal" | "Float",
"BigInt" | "Int" | "BigDecimal" | "Float",
):
return TypeRef.Named(name="Float", kind="SCALAR")
case _ as args:
raise Exception(
f"typeref_of_binary_op: f_typeref: unhandled arguments {args}"
)
def f_const(t1, const):
match (op, TypeRef.root_type_name(t1), const):
case ("add", "String" | "Bytes", str()):
return TypeRef.Named(name="String", kind="SCALAR")
case (
"add" | "sub" | "mul" | "div" | "pow" | "mod",
"BigInt" | "Int",
int(),
):
return TypeRef.Named(name="Int", kind="SCALAR")
case ("add" | "sub" | "mul" | "div" | "pow", "BigInt" | "Int", float()):
return TypeRef.Named(name="Float", kind="SCALAR")
case (
"add" | "sub" | "mul" | "div" | "pow",
"BigDecimal" | "Float",
int() | float(),
):
return TypeRef.Named(name="Float", kind="SCALAR")
case _ as args:
raise Exception(
f"typeref_of_binary_op: f_typeref: unhandled arguments {args}"
)
match t2:
case int() | float() | str() | bool() as constant:
return f_const(t1, constant)
case FieldPath() | SyntheticField() as field:
return f_typeref(t1, field._type)
|
(self, other)
|
709,038
|
subgrounds.subgraph.fieldpath
|
__init__
| null |
def __init__(
self,
f: Callable,
type_: TypeRef.T,
deps: list[FieldPath | SyntheticField] | FieldPath | SyntheticField,
default: Any = None,
) -> None:
deps = list([deps] | traverse)
def mk_deps(
deps: list[FieldPath | SyntheticField],
f: Callable,
acc: list[tuple[Callable | None, int]] = [],
) -> tuple[Callable, list[FieldPath]]:
"""If all dependencies are field paths, then this function does nothing.
If the dependencies contain one or more other synthetic fields, as is the
case when chaining binary operators, then the synthetic field tree is
flattened to a single synthetic field containing all leaf dependencies.
Args:
deps: Initial dependencies for synthetic field
f: Function to apply to the values of those dependencies
acc: Accumulator. Defaults to [].
Returns:
A tuple containing the potentially modified function and dependency list.
"""
match deps:
case []:
def new_f(*args):
new_args = []
_counter = 0
for f_, deps in acc:
match (f_, deps):
case (None, FieldPath()):
new_args.append(args[_counter])
_counter += 1
case (
None,
int() | float() | str() | bool() as constant,
):
new_args.append(constant)
case (f_, list() as deps):
new_args.append(
f_(*args[_counter : _counter + len(deps)])
)
_counter += len(deps)
return f(*new_args)
new_deps = []
for _, deps in acc:
match deps:
case FieldPath() as dep:
new_deps.append(dep)
case int() | float() | str() | bool():
pass
case list() as deps:
new_deps = new_deps + deps
return (new_f, new_deps)
case [SyntheticField(_f=inner_f, _deps=inner_deps), *rest]:
acc.append((inner_f, inner_deps))
return mk_deps(rest, f, acc)
case [FieldPath() as dep, *rest]:
acc.append((None, dep))
return mk_deps(rest, f, acc)
case [int() | float() | str() | bool() as constant, *rest]:
acc.append((None, constant))
return mk_deps(rest, f, acc)
case _ as deps:
raise TypeError(f"mk_deps: unexpected argument {deps}")
(f, deps) = mk_deps(deps, f)
self._f = f
self._type = type_
self._default = (
default if default is not None else SyntheticField.default_of_type(type_)
)
self._deps = deps
SyntheticField._counter += 1
|
(self, f: Callable, type_: subgrounds.schema.TypeRef.T, deps: list[subgrounds.subgraph.fieldpath.FieldPath | subgrounds.subgraph.fieldpath.SyntheticField] | subgrounds.subgraph.fieldpath.FieldPath | subgrounds.subgraph.fieldpath.SyntheticField, default: Optional[Any] = None) -> NoneType
|
709,044
|
subgrounds.subgraph.fieldpath
|
__repr__
| null |
def fieldpaths_of_object(
subgraph: Subgraph, object_: TypeMeta.ObjectMeta | TypeMeta.InterfaceMeta
):
"""Returns generator of FieldPath objects that selects all non-list fields of
GraphQL Object of Interface :attr:`object_`.
Args:
schema: _description_
object_: _description_
Yields:
_description_
"""
for fmeta in object_.fields:
if not fmeta.type_.is_list and len(fmeta.arguments) == 0:
match subgraph._schema.type_of_typeref(fmeta.type_):
case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta():
yield subgraph.__getattribute__(object_.name).__getattribute__(
fmeta.name
).id
case _:
yield subgraph.__getattribute__(object_.name).__getattribute__(
fmeta.name
)
|
(self)
|
709,053
|
subgrounds.subgraph.fieldpath
|
default_of_type
| null |
@staticmethod
def default_of_type(type_: TypeRef.T):
match type_.name:
case "String":
return ""
case "Int":
return 0
case "Float":
return 0.0
case "Boolean":
return False
case _:
return 0
|
(type_: subgrounds.schema.TypeRef.T)
|
709,130
|
blabber
|
StarterGenerator
| null |
class StarterGenerator:
def _shuffle_starters(self):
"""Resets the generator's internal starter queue."""
self.starters = random.sample(
list(self.original_starters), len(self.original_starters)
)
def __init__(self):
self.original_starters = set()
data_dir = Path(__file__).parent / "data"
for topic_path in data_dir.glob("*.txt"):
with open(topic_path, encoding="utf-8") as topic_file:
self.original_starters.update(
line.strip() for line in topic_file.readlines()
)
self._shuffle_starters()
def starter(self):
"""Gets a fresh starter from the queue."""
# If the queue is out of starters, re-shuffle
if len(self.starters) == 0:
self._shuffle_starters()
# Get a random starter while also removing it from the queue
return self.starters.pop()
|
()
|
709,131
|
blabber
|
__init__
| null |
def __init__(self):
self.original_starters = set()
data_dir = Path(__file__).parent / "data"
for topic_path in data_dir.glob("*.txt"):
with open(topic_path, encoding="utf-8") as topic_file:
self.original_starters.update(
line.strip() for line in topic_file.readlines()
)
self._shuffle_starters()
|
(self)
|
709,132
|
blabber
|
_shuffle_starters
|
Resets the generator's internal starter queue.
|
def _shuffle_starters(self):
"""Resets the generator's internal starter queue."""
self.starters = random.sample(
list(self.original_starters), len(self.original_starters)
)
|
(self)
|
709,133
|
blabber
|
starter
|
Gets a fresh starter from the queue.
|
def starter(self):
"""Gets a fresh starter from the queue."""
# If the queue is out of starters, re-shuffle
if len(self.starters) == 0:
self._shuffle_starters()
# Get a random starter while also removing it from the queue
return self.starters.pop()
|
(self)
|
709,135
|
bzutech.bzutechapi.BzuTechAPI
|
BzuTech
| null |
class BzuTech:
def __init__(self, email: str, password: str) -> None:
self.email = email
self.password = password
self._token = None
self._operatorid = None
self._contratoid = None
self.dispositivos = None
self.httpheaders = {}
async def start(self) -> bool:
if await self._async_auth():
self.httpheaders["Authorization"] = "Bearer " + self._token
contract = await self._async_set_contrato()
got_devices = await self._async_set_dispositivos()
return contract and got_devices
else: return False
async def _async_auth(self) -> bool:
url = "https://back-prd.bzutech.com.br/auth/login/"
data = {"operador_email": self.email, "password": self.password}
client = ClientSession()
try:
async with client.post(url=url, data=data) as resp:
resposta = await resp.json()
await client.close()
self._token = resposta["tokens"]["access"]
self._operatorid = resposta["id"]
return True
except(KeyError):
return False
async def _async_set_contrato(self) -> bool:
""""""
url = "https://back-prd.bzutech.com.br/operador/navbar/" + str(self._operatorid)
client = ClientSession()
async with client.get(url=url, headers = self.httpheaders) as resp:
resposta = await resp.json()
await client.close()
self._contratoid = resposta["empresas"][0]["contratos_id"]
return True
async def _async_set_dispositivos(self) -> bool:
url = "https://back-prd.bzutech.com.br/dispositivos/listar/" + str(
self._contratoid
)
dispositivos = {}
client = ClientSession()
async with client.get(url=url, headers = self.httpheaders) as resp:
resposta = await resp.json()
for disp in resposta:
if disp["status_dispositivo"] == 1:
chipid = int(disp["boot_chip_id"])
dispname = (
disp["dispnum"]
if disp["dispname"] == None
else disp["dispname"]
)
dispositivo = Device(chipid, self.httpheaders, dispname)
await dispositivo.initialize()
dispositivos[str(chipid)] = dispositivo
self.dispositivos = dispositivos
await client.close()
return True
def get_token(self) -> str:
return self._token
def get_device_names(self) -> list:
return list(self.dispositivos.keys())
def get_sensors(self, chipid: str) -> list:
if chipid in self.get_device_names():
return list(self.dispositivos[chipid].get_sensor_names())
return ["-1"]
def get_sensors_on(self, chipid: str, port: int) -> list:
if chipid in self.get_device_names():
list = self.dispositivos[chipid].get_sensor_names_on(port)
return list
return ["-1"]
def get_endpoint_on(self, chipid: str, port: int) -> str:
if(chipid in self.get_device_names()):
return self.dispositivos[chipid].get_ep_on(port)
return "-1"
def get_reading(self, chipid: str, sensorname:str):
return self.dispositivos[chipid].get_readings(sensorname.upper())
async def send_reading(self, sensoref:str, DeviceID: str, reading: float, date: str):
url = "https://back-dev.bzutech.com.br/logs/home_assistence/"
data = '{"bci":"'+DeviceID+'", "date":"'+ date +'", "data":\"[{\'ref\':\''+ sensoref +'\',\'med\':'+ str(reading) +'}]\"}'
print(data)
data = json.loads(data)
client = ClientSession()
async with client.post(url, json=data) as resp:
await client.close()
print(resp.text)
return (data, resp.text)
def get_apiversion(self):
return "2.3.3"
|
(email: str, password: str) -> None
|
709,136
|
bzutech.bzutechapi.BzuTechAPI
|
__init__
| null |
def __init__(self, email: str, password: str) -> None:
self.email = email
self.password = password
self._token = None
self._operatorid = None
self._contratoid = None
self.dispositivos = None
self.httpheaders = {}
|
(self, email: str, password: str) -> NoneType
|
709,138
|
bzutech.bzutechapi.BzuTechAPI
|
_async_set_contrato
|
def __init__(self, email: str, password: str) -> None:
self.email = email
self.password = password
self._token = None
self._operatorid = None
self._contratoid = None
self.dispositivos = None
self.httpheaders = {}
|
(self) -> bool
|
|
709,140
|
bzutech.bzutechapi.BzuTechAPI
|
get_apiversion
| null |
def get_apiversion(self):
return "2.3.3"
|
(self)
|
709,141
|
bzutech.bzutechapi.BzuTechAPI
|
get_device_names
| null |
def get_device_names(self) -> list:
return list(self.dispositivos.keys())
|
(self) -> list
|
709,142
|
bzutech.bzutechapi.BzuTechAPI
|
get_endpoint_on
| null |
def get_endpoint_on(self, chipid: str, port: int) -> str:
if(chipid in self.get_device_names()):
return self.dispositivos[chipid].get_ep_on(port)
return "-1"
|
(self, chipid: str, port: int) -> str
|
709,143
|
bzutech.bzutechapi.BzuTechAPI
|
get_reading
| null |
def get_reading(self, chipid: str, sensorname:str):
return self.dispositivos[chipid].get_readings(sensorname.upper())
|
(self, chipid: str, sensorname: str)
|
709,144
|
bzutech.bzutechapi.BzuTechAPI
|
get_sensors
| null |
def get_sensors(self, chipid: str) -> list:
if chipid in self.get_device_names():
return list(self.dispositivos[chipid].get_sensor_names())
return ["-1"]
|
(self, chipid: str) -> list
|
709,145
|
bzutech.bzutechapi.BzuTechAPI
|
get_sensors_on
| null |
def get_sensors_on(self, chipid: str, port: int) -> list:
if chipid in self.get_device_names():
list = self.dispositivos[chipid].get_sensor_names_on(port)
return list
return ["-1"]
|
(self, chipid: str, port: int) -> list
|
709,146
|
bzutech.bzutechapi.BzuTechAPI
|
get_token
| null |
def get_token(self) -> str:
return self._token
|
(self) -> str
|
709,149
|
bzutech.device.Device
|
Device
| null |
class Device:
def __init__(self, chipid: int, token: dict, dispname: str = "" ):
self.chipid = chipid
self.dispname = dispname
self.httpheaders = token
self.sensores = {}
async def _async_set_sensores(self):
url = "https://back-prd.bzutech.com.br/dispositivos/canais-list/" + str(
self.chipid
)
sensores = []
client = ClientSession()
async with client.get(url, headers = self.httpheaders) as resp:
resposta = await resp.json()
await client.close()
for sensor in resposta:
if sensor["ultima_medicao_sensor"] != None:
sensores.append(
Sensor
(
self.chipid,
sensor["sensor_nome"].upper(),
sensor["apelido_canal"],
self.httpheaders
)
)
return sensores
async def initialize(self):
self.sensores = await self._async_set_sensores()
flemis = {}
for sensor in self.sensores:
flemis[sensor.apelido] = sensor
self.sensores = flemis
def get_sensor_names(self):
return list(self.sensores.keys())
def get_sensor_names_on(self, port: str):
sensores = []
for sensor in self.get_sensor_names():
if sensor[-1] == port:
sensores.append(sensor)
return sensores
def get_ep_on(self, port:int):
sensors = self.get_sensor_names_on(str(port))
for sensor in sensors:
if "ADS7878" in sensor:
return "EP400"
if "RTZSBZ" in sensor:
return "EP300"
if "SGP30" in sensor:
return "EP200"
if "DOOR" in sensor:
return "EP121"
if "SHT30" in sensor:
return "EP111"
async def get_readings(self, nome_sensor):
return await self.sensores[nome_sensor].get_leitura()
def get_chipid(self):
return self.chipid
|
(chipid: int, token: dict, dispname: str = '')
|
709,150
|
bzutech.device.Device
|
__init__
| null |
def __init__(self, chipid: int, token: dict, dispname: str = "" ):
self.chipid = chipid
self.dispname = dispname
self.httpheaders = token
self.sensores = {}
|
(self, chipid: int, token: dict, dispname: str = '')
|
709,152
|
bzutech.device.Device
|
get_chipid
| null |
def get_chipid(self):
return self.chipid
|
(self)
|
709,153
|
bzutech.device.Device
|
get_ep_on
| null |
def get_ep_on(self, port:int):
sensors = self.get_sensor_names_on(str(port))
for sensor in sensors:
if "ADS7878" in sensor:
return "EP400"
if "RTZSBZ" in sensor:
return "EP300"
if "SGP30" in sensor:
return "EP200"
if "DOOR" in sensor:
return "EP121"
if "SHT30" in sensor:
return "EP111"
|
(self, port: int)
|
709,155
|
bzutech.device.Device
|
get_sensor_names
| null |
def get_sensor_names(self):
return list(self.sensores.keys())
|
(self)
|
709,156
|
bzutech.device.Device
|
get_sensor_names_on
| null |
def get_sensor_names_on(self, port: str):
sensores = []
for sensor in self.get_sensor_names():
if sensor[-1] == port:
sensores.append(sensor)
return sensores
|
(self, port: str)
|
709,158
|
bzutech.sensor.Sensor
|
Sensor
| null |
class Sensor:
def __init__(self, chipid: int, canal: str, apelido: str, token:dict) -> None:
self.chipid = "11000"[0 : 8 - len(str(chipid))] + str(chipid)
self.apelido = canal
self.httpheaders = token
self.porta = canal[-1]
self.last_reading = 0
tipos_sensores = {
"TMP": "Temperatura",
"HUM": "Humidade",
"VOT": "Voltagem",
"CO2": "CO2",
"CUR": "Corrente",
"LUM": "Luminosidade",
"PIR": "",
"DOOR": "Porta",
"DOR": "DOR",
"M10": "M10",
"M25": "M25",
"M40": "M40",
"SND": "SOM",
"M01": "M01",
"C01": "C01",
"VOC": "Gases Volateis",
"DOS": "DOS",
"VOA": "Voltagem",
"VOB": "Voltagem",
"VOC": "Voltagem",
"CRA": "Corrente",
"CRB": "Corrente",
"CRC": "Corrente",
"VRA": "Voltagem",
"VRB": "Voltagem",
"VRC": "Voltagem",
"C05": "C05",
"C01": "C01",
"C25": "C25",
"C40": "C40",
"C10": "C10",
"UPT": "UPTIME",
"DBM": "WIFI",
"BAT": "BATTERY",
"MEM": "MEMORY"
}
cod_sensores = {
"AHT10-TMP": "01",
"SCT013-CUR": "02",
"SHT30-TMP": "03",
"SR602-DOOR": "04",
"TSL2561-LUM": "05",
"ZMPT-CUR": "06",
"AHT10-HUM": "07",
"SHT30-HUM": "08",
"ZMPT-VOT": "09",
"SR602-PIR": "10",
"BH1750-LUM": "11",
"SCHNE-VOT": "12",
"S16L201D-PIR": "13",
"SCHNE-CUR": "14",
"SPS30-M01": "64",
"SPS30-M25": "66",
"SPS30-M40": "68",
"SPS30-M10": "70",
"SHT20-TMP": "20",
"DOOR-DOR": "23",
"AHT20-TMP": "22",
"SHT26-HUM": "85",
"SHT24-HUM": "84",
"SHT25-TMP": "83",
"RTZSBZ-SND": "82",
"SGP30-VOC": "60",
"SGP30-CO2": "62",
"SPS30-C01": "87",
"DOOR-DOS": "86",
"SPS30-C05": "100",
"SPS30-C10": "99",
"SPS30-C25": "97",
"SPS30-C40": "98",
"ADS7878-VOA": "88",
"ADS7878-VOB": "89",
"ADS7878-VOC": "90",
"ADS7878-CRA": "91",
"ADS7878-CRB": "92",
"ADS7878-CRC": "93",
"ADS7878-VRA": "94",
"ADS7878-VRB": "95",
"ADS7878-VRC": "96",
"GATEWAY-DBM": "101",
"GATEWAY-MEM": "102",
"GATEWAY-BAT": "103",
"GATEWAY-UPT": "104"
}
try:
self.tipo = tipos_sensores[canal.split("-")[1]]
self.sensorref = canal[-1] + "00" + cod_sensores[canal[0:-2]]
except KeyError:
self.tipo = tipos_sensores["PIR"]
self.sensorref = canal[-1] + "00" + cod_sensores["SR602-PIR"]
async def get_leitura(self):
url = (
"https://back-prd.bzutech.com.br/logs/ultima_medicao/"
+ self.chipid
+ "/"
+ self.sensorref
)
client = ClientSession( )
async with client.get(url, headers = self.httpheaders) as resp:
resposta = await resp.json()
await client.close()
if("medicao" in resposta):
self.last_reading = int(resposta["medicao"]) / 1000000
print(resposta)
return self.last_reading
|
(chipid: int, canal: str, apelido: str, token: dict) -> None
|
709,159
|
bzutech.sensor.Sensor
|
__init__
| null |
def __init__(self, chipid: int, canal: str, apelido: str, token:dict) -> None:
self.chipid = "11000"[0 : 8 - len(str(chipid))] + str(chipid)
self.apelido = canal
self.httpheaders = token
self.porta = canal[-1]
self.last_reading = 0
tipos_sensores = {
"TMP": "Temperatura",
"HUM": "Humidade",
"VOT": "Voltagem",
"CO2": "CO2",
"CUR": "Corrente",
"LUM": "Luminosidade",
"PIR": "",
"DOOR": "Porta",
"DOR": "DOR",
"M10": "M10",
"M25": "M25",
"M40": "M40",
"SND": "SOM",
"M01": "M01",
"C01": "C01",
"VOC": "Gases Volateis",
"DOS": "DOS",
"VOA": "Voltagem",
"VOB": "Voltagem",
"VOC": "Voltagem",
"CRA": "Corrente",
"CRB": "Corrente",
"CRC": "Corrente",
"VRA": "Voltagem",
"VRB": "Voltagem",
"VRC": "Voltagem",
"C05": "C05",
"C01": "C01",
"C25": "C25",
"C40": "C40",
"C10": "C10",
"UPT": "UPTIME",
"DBM": "WIFI",
"BAT": "BATTERY",
"MEM": "MEMORY"
}
cod_sensores = {
"AHT10-TMP": "01",
"SCT013-CUR": "02",
"SHT30-TMP": "03",
"SR602-DOOR": "04",
"TSL2561-LUM": "05",
"ZMPT-CUR": "06",
"AHT10-HUM": "07",
"SHT30-HUM": "08",
"ZMPT-VOT": "09",
"SR602-PIR": "10",
"BH1750-LUM": "11",
"SCHNE-VOT": "12",
"S16L201D-PIR": "13",
"SCHNE-CUR": "14",
"SPS30-M01": "64",
"SPS30-M25": "66",
"SPS30-M40": "68",
"SPS30-M10": "70",
"SHT20-TMP": "20",
"DOOR-DOR": "23",
"AHT20-TMP": "22",
"SHT26-HUM": "85",
"SHT24-HUM": "84",
"SHT25-TMP": "83",
"RTZSBZ-SND": "82",
"SGP30-VOC": "60",
"SGP30-CO2": "62",
"SPS30-C01": "87",
"DOOR-DOS": "86",
"SPS30-C05": "100",
"SPS30-C10": "99",
"SPS30-C25": "97",
"SPS30-C40": "98",
"ADS7878-VOA": "88",
"ADS7878-VOB": "89",
"ADS7878-VOC": "90",
"ADS7878-CRA": "91",
"ADS7878-CRB": "92",
"ADS7878-CRC": "93",
"ADS7878-VRA": "94",
"ADS7878-VRB": "95",
"ADS7878-VRC": "96",
"GATEWAY-DBM": "101",
"GATEWAY-MEM": "102",
"GATEWAY-BAT": "103",
"GATEWAY-UPT": "104"
}
try:
self.tipo = tipos_sensores[canal.split("-")[1]]
self.sensorref = canal[-1] + "00" + cod_sensores[canal[0:-2]]
except KeyError:
self.tipo = tipos_sensores["PIR"]
self.sensorref = canal[-1] + "00" + cod_sensores["SR602-PIR"]
|
(self, chipid: int, canal: str, apelido: str, token: dict) -> NoneType
|
709,165
|
click.decorators
|
decorator
| null |
def option(
*param_decls: str, cls: t.Optional[t.Type[Option]] = None, **attrs: t.Any
) -> t.Callable[[FC], FC]:
"""Attaches an option to the command. All positional arguments are
passed as parameter declarations to :class:`Option`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Option` instance manually
and attaching it to the :attr:`Command.params` list.
For the default option class, refer to :class:`Option` and
:class:`Parameter` for descriptions of parameters.
:param cls: the option class to instantiate. This defaults to
:class:`Option`.
:param param_decls: Passed as positional arguments to the constructor of
``cls``.
:param attrs: Passed as keyword arguments to the constructor of ``cls``.
"""
if cls is None:
cls = Option
def decorator(f: FC) -> FC:
_param_memo(f, cls(param_decls, **attrs))
return f
return decorator
|
(f: ~FC) -> ~FC
|
709,167
|
click.decorators
|
decorator
| null |
def argument(
*param_decls: str, cls: t.Optional[t.Type[Argument]] = None, **attrs: t.Any
) -> t.Callable[[FC], FC]:
"""Attaches an argument to the command. All positional arguments are
passed as parameter declarations to :class:`Argument`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Argument` instance manually
and attaching it to the :attr:`Command.params` list.
For the default argument class, refer to :class:`Argument` and
:class:`Parameter` for descriptions of parameters.
:param cls: the argument class to instantiate. This defaults to
:class:`Argument`.
:param param_decls: Passed as positional arguments to the constructor of
``cls``.
:param attrs: Passed as keyword arguments to the constructor of ``cls``.
"""
if cls is None:
cls = Argument
def decorator(f: FC) -> FC:
_param_memo(f, cls(param_decls, **attrs))
return f
return decorator
|
(f: ~FC) -> ~FC
|
709,171
|
cligj
|
geojson_type_bbox_opt
|
GeoJSON bbox output mode
|
def geojson_type_bbox_opt(default=False):
"""GeoJSON bbox output mode"""
return click.option(
'--bbox',
'geojson_type',
flag_value='bbox',
default=default,
help="Output as GeoJSON bounding box array(s).")
|
(default=False)
|
709,172
|
cligj
|
geojson_type_collection_opt
|
GeoJSON FeatureCollection output mode
|
def geojson_type_collection_opt(default=False):
"""GeoJSON FeatureCollection output mode"""
return click.option(
'--collection',
'geojson_type',
flag_value='collection',
default=default,
help="Output as GeoJSON feature collection(s).")
|
(default=False)
|
709,173
|
cligj
|
geojson_type_feature_opt
|
GeoJSON Feature or Feature sequence output mode
|
def geojson_type_feature_opt(default=False):
"""GeoJSON Feature or Feature sequence output mode"""
return click.option(
'--feature',
'geojson_type',
flag_value='feature',
default=default,
help="Output as GeoJSON feature(s).")
|
(default=False)
|
709,175
|
cligj.features
|
normalize_feature_inputs
|
Click callback that normalizes feature input values.
Returns a generator over features from the input value.
Parameters
----------
ctx: a Click context
param: the name of the argument or option
value: object
The value argument may be one of the following:
1. A list of paths to files containing GeoJSON feature
collections or feature sequences.
2. A list of string-encoded coordinate pairs of the form
"[lng, lat]", or "lng, lat", or "lng lat".
If no value is provided, features will be read from stdin.
Yields
------
Mapping
A GeoJSON Feature represented by a Python mapping
|
def normalize_feature_inputs(ctx, param, value):
"""Click callback that normalizes feature input values.
Returns a generator over features from the input value.
Parameters
----------
ctx: a Click context
param: the name of the argument or option
value: object
The value argument may be one of the following:
1. A list of paths to files containing GeoJSON feature
collections or feature sequences.
2. A list of string-encoded coordinate pairs of the form
"[lng, lat]", or "lng, lat", or "lng lat".
If no value is provided, features will be read from stdin.
Yields
------
Mapping
A GeoJSON Feature represented by a Python mapping
"""
for feature_like in value or ('-',):
try:
with click.open_file(feature_like, encoding="utf-8") as src:
for feature in iter_features(iter(src)):
yield feature
except IOError:
coords = list(coords_from_query(feature_like))
yield {
'type': 'Feature',
'properties': {},
'geometry': {
'type': 'Point',
'coordinates': coords}}
|
(ctx, param, value)
|
709,185
|
evidently.pipeline.column_mapping
|
ColumnMapping
|
ColumnMapping(target: Optional[str] = 'target', prediction: Union[str, int, Sequence[str], Sequence[int], NoneType] = 'prediction', datetime: Optional[str] = 'datetime', id: Optional[str] = None, numerical_features: Optional[List[str]] = None, categorical_features: Optional[List[str]] = None, datetime_features: Optional[List[str]] = None, target_names: Union[List[Union[int, str]], Dict[Union[int, str], str], NoneType] = None, task: Optional[str] = None, pos_label: Union[str, int, NoneType] = 1, text_features: Optional[List[str]] = None, embeddings: Optional[Dict[str, List[str]]] = None, user_id: Optional[str] = 'user_id', item_id: Optional[str] = 'item_id', recommendations_type: Union[evidently.pipeline.column_mapping.RecomType, str] = <RecomType.SCORE: 'score'>)
|
class ColumnMapping:
target: Optional[str] = "target"
prediction: Optional[Union[str, int, Union[Sequence[str], Sequence[int]]]] = "prediction"
datetime: Optional[str] = "datetime"
id: Optional[str] = None
numerical_features: Optional[List[str]] = None
categorical_features: Optional[List[str]] = None
datetime_features: Optional[List[str]] = None
target_names: Optional[TargetNames] = None
task: Optional[str] = None
pos_label: Optional[Union[str, int]] = 1
text_features: Optional[List[str]] = None
embeddings: Optional[Embeddings] = None
user_id: Optional[str] = "user_id"
item_id: Optional[str] = "item_id"
recommendations_type: Union[RecomType, str] = RecomType.SCORE
@property
def recom_type(self) -> RecomType:
if isinstance(self.recommendations_type, str):
return RecomType(self.recommendations_type)
return self.recommendations_type
def is_classification_task(self) -> bool:
return self.task == TaskType.CLASSIFICATION_TASK
def is_regression_task(self) -> bool:
return self.task == TaskType.REGRESSION_TASK
|
(target: Optional[str] = 'target', prediction: Union[str, int, Sequence[str], Sequence[int], NoneType] = 'prediction', datetime: Optional[str] = 'datetime', id: Optional[str] = None, numerical_features: Optional[List[str]] = None, categorical_features: Optional[List[str]] = None, datetime_features: Optional[List[str]] = None, target_names: Union[List[Union[int, str]], Dict[Union[int, str], str], NoneType] = None, task: Optional[str] = None, pos_label: Union[str, int, NoneType] = 1, text_features: Optional[List[str]] = None, embeddings: Optional[Dict[str, List[str]]] = None, user_id: Optional[str] = 'user_id', item_id: Optional[str] = 'item_id', recommendations_type: Union[evidently.pipeline.column_mapping.RecomType, str] = <RecomType.SCORE: 'score'>) -> None
|
709,186
|
evidently.pipeline.column_mapping
|
__eq__
| null |
from dataclasses import dataclass
from enum import Enum
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
class TaskType:
REGRESSION_TASK: str = "regression"
CLASSIFICATION_TASK: str = "classification"
RECOMMENDER_SYSTEMS: str = "recsys"
|
(self, other)
|
709,189
|
evidently.pipeline.column_mapping
|
is_classification_task
| null |
def is_classification_task(self) -> bool:
return self.task == TaskType.CLASSIFICATION_TASK
|
(self) -> bool
|
709,190
|
evidently.pipeline.column_mapping
|
is_regression_task
| null |
def is_regression_task(self) -> bool:
return self.task == TaskType.REGRESSION_TASK
|
(self) -> bool
|
709,191
|
evidently.pipeline.column_mapping
|
TaskType
| null |
class TaskType:
REGRESSION_TASK: str = "regression"
CLASSIFICATION_TASK: str = "classification"
RECOMMENDER_SYSTEMS: str = "recsys"
|
()
|
709,192
|
evidently.nbextension
|
_jupyter_nbextension_paths
| null |
def _jupyter_nbextension_paths():
return [{"section": "notebook", "src": "nbextension/static", "dest": "evidently", "require": "evidently/extension"}]
|
()
|
709,196
|
biglist._biglist
|
Biglist
| null |
class Biglist(BiglistBase[Element]):
registered_storage_formats = {
'json': serializer.JsonSerializer,
'pickle': serializer.PickleSerializer,
'pickle-zstd': serializer.ZstdPickleSerializer,
}
DEFAULT_STORAGE_FORMAT = 'pickle-zstd'
@classmethod
def register_storage_format(
cls,
name: str,
serializer: type[serializer.Serializer],
) -> None:
"""
Register a new serializer to handle data file dumping and loading.
This class has a few serializers registered out of the box.
They should be adequate for most applications.
Parameters
----------
name
Name of the format to be associated with the new serializer.
After registering the new serializer with name "xyz", one can use
``storage_format='xyz'`` in calls to :meth:`new`.
When reading the object back from persistence,
make sure this registry is also in place so that the correct
deserializer can be found.
serializer
A subclass of `upathlib.serializer.Serializer <https://github.com/zpz/upathlib/blob/main/src/upathlib/serializer.py>`_.
Although this class needs to provide the ``Serializer`` API, it is possible to write data files in text mode.
The registered 'json' format does that.
"""
good = string.ascii_letters + string.digits + '-_'
assert all(n in good for n in name)
if name.replace('_', '-') in cls.registered_storage_formats:
raise ValueError(f"serializer '{name}' is already registered")
name = name.replace('_', '-')
cls.registered_storage_formats[name] = serializer
@classmethod
def new(
cls,
path: PathType | None = None,
*,
batch_size: int | None = None,
storage_format: str | None = None,
serialize_kwargs: dict | None = None,
deserialize_kwargs: dict | None = None,
init_info: dict = None,
**kwargs,
) -> Self:
"""
Parameters
----------
path
Passed on to :meth:`BiglistBase.new`.
batch_size
Max number of data elements in each persisted data file.
There's no good default value for this parameter, although one is
provided (currently the default is 1000),
because the code of :meth:`new` doesn't know
the typical size of the data elements. User is recommended to
specify the value of this parameter.
In choosing a value for ``batch_size``, the most important
consideration is the size of each data file, which is determined
by the typical size of the data elements as well as ``batch_size``,
which is the upper bound of the the number of elements in each file.
There are several considerations about the data file sizes:
- It should not be so small that the file reading/writing is a large
overhead relative to actual processing of the data.
This is especially important when ``path`` is cloud storage.
- It should not be so large that it is "unwieldy", e.g. approaching 1GB.
- When :meth:`__iter__`\\ating over a :class:`Biglist` object, there can be up to (by default) 4
files-worth of data in memory at any time, where 4 is ``self._n_read_threads`` plus 1.
- When :meth:`append`\\ing or :meth:`extend`\\ing to a :class:`Biglist` object at high speed,
there can be up to (by default) 9 times ``batch_size`` data elements in memory at any time,
where 9 is ``self._n_write_threads`` plus 1.
See :meth:`_flush` and :class:`~biglist._biglist.Dumper`.
Another consideration is access pattern of elements in the :class:`Biglist`. If
there are many "jumping around" with random element access, large data files
will lead to very wasteful file loading, because to read any element,
its hosting file must be read into memory. (After all, if the application is
heavy on random access, then :class:`Biglist` is **not** the right tool.)
The performance of iteration is not expected to be highly sensitive to the value
of ``batch_size``, as long as it is in a reasonable range.
A rule of thumb: it is recommended to keep the persisted files between 32-128MB
in size. (Note: no benchmark was performed to back this recommendation.)
storage_format
This should be a key in :data:`registered_storage_formats`.
If not specified, :data:`DEFAULT_STORAGE_FORMAT` is used.
serialize_kwargs
Additional keyword arguments to the serialization function.
deserialize_kwargs
Additional keyword arguments to the deserialization function.
``serialize_kwargs`` and ``deserialize_kwargs`` are rarely needed.
One use case is ``schema`` when storage format is "parquet".
See :class:`~biglist._biglist.ParquetSerializer`.
``serialize_kwargs`` and ``deserialize_kwargs``, if not ``None``,
will be saved in the "info.json" file, hence they must be JSON
serializable, meaning they need to be the few simple native Python
types that are supported by the standard ``json`` library.
(However, the few formats "natively" supported by Biglist may get special treatment
to relax this requirement.)
If this is not possible, the solution is to define a custom serialization class and
register it with :meth:`register_storage_format`.
**kwargs
additional arguments are passed on to :meth:`BiglistBase.new`.
Returns
-------
Biglist
A new :class:`Biglist` object.
"""
if not batch_size:
batch_size = 1000
warnings.warn(
'The default batch-size, 1000, may not be optimal for your use case; consider using the parameter ``batch_size``.'
)
else:
assert batch_size > 0
if storage_format is None:
storage_format = cls.DEFAULT_STORAGE_FORMAT
if storage_format.replace('_', '-') not in cls.registered_storage_formats:
raise ValueError(f"invalid value of `storage_format`: '{storage_format}'")
init_info = {
**(init_info or {}),
'storage_format': storage_format.replace('_', '-'),
'storage_version': 3,
# `storage_version` is a flag for certain breaking changes in the implementation,
# such that certain parts of the code (mainly concerning I/O) need to
# branch into different treatments according to the version.
# This has little relation to `storage_format`.
# version 0 designator introduced on 2022/3/8
# version 1 designator introduced on 2022/7/25
# version 2 designator introduced in version 0.7.4.
# version 3 designator introduced in version 0.7.7.
'batch_size': batch_size,
'data_files_info': [],
}
if serialize_kwargs:
init_info['serialize_kwargs'] = serialize_kwargs
if deserialize_kwargs:
init_info['deserialize_kwargs'] = deserialize_kwargs
obj = super().new(path, init_info=init_info, **kwargs) # type: ignore
return obj
def __init__(
self,
*args,
**kwargs,
):
"""
Please see the base class for additional documentation.
"""
super().__init__(*args, **kwargs)
self.keep_files: bool = True
"""Indicates whether the persisted files should be kept or deleted when the object is garbage-collected."""
self._append_buffer: list = []
self._append_files_buffer: list = []
self._file_dumper = None
self._n_write_threads = 4
"""This value affects memory demand during quick "appending" (and flushing/dumping in the background).
If the memory consumption of each batch is large, you could manually set this to a lower value, like::
lst = Biglist(path)
lst._n_write_threads = 4
"""
self._serialize_kwargs = self.info.get('serialize_kwargs', {})
self._deserialize_kwargs = self.info.get('deserialize_kwargs', {})
if self.storage_format == 'parquet' and 'schema_spec' in self._serialize_kwargs:
# Build the schema so that it does not need to be done each time the function
# ``ParquetSerializer.serialize`` is called. Maybe this does not matter.
assert 'schema' not in self._serialize_kwargs
kk = copy.deepcopy(self._serialize_kwargs)
kk['schema'] = make_parquet_schema(kk['schema_spec'])
del kk['schema_spec']
self._serialize_kwargs = kk
_biglist_objs.add(self)
# For back compat.
if self.info.get('storage_version', 0) < 3:
# This is not called by ``new``, instead is opening an existing dataset.
# Usually these legacy datasets are in a "read-only" mode, i.e., you should
# not append more data to them. If you do, the back-compat code below
# may not be totally reliable if the dataset is being used by multiple workers
# concurrently.
if 'data_files_info' not in self.info: # added in 0.7.4
if self.storage_version == 0:
# This may not be totally reliable in every scenario.
# The older version had a parameter `lazy`, which is gone now.
# After some time we may stop supporting this storage version. (7/27/2022)
# However, as long as older datasets are in a "read-only" status,
# this is fine.
try:
data_info_file = self.path / 'datafiles_info.json'
data_files = data_info_file.read_json()
# A list of tuples, (file_name, item_count)
except FileNotFoundError:
data_files = []
elif self.storage_version == 1:
# Starting with storage_version 1, data file name is
# <timestamp>_<uuid>_<itemcount>.<ext>
# <timestamp> contains a '.', no '_';
# <uuid> contains '-', no '_';
# <itemcount> contains no '-' nor '_';
# <ext> may contain '_'.
files0 = (v.name for v in self.data_path.iterdir())
files1 = (v.split('_') + [v] for v in files0)
files2 = (
(float(v[0]), v[-1], int(v[2].partition('.')[0]))
# timestamp, file name, item count
for v in files1
)
files = sorted(files2) # sort by timestamp
if files:
data_files = [
(v[1], v[2]) for v in files
] # file name, item count
else:
data_files = []
else:
pass
# `storage_version == 2 already has `data_files_info` in `self.info`,
# but its format may be bad, hence it's included below.
if data_files:
data_files_cumlength = list(
itertools.accumulate(v[1] for v in data_files)
)
data_files_info = [
(filename, count, cumcount)
for (filename, count), cumcount in zip(
data_files, data_files_cumlength
)
]
# Each element of the list is a tuple containing file name, item count in file, and cumsum of item counts.
else:
data_files_info = []
self.info['data_files_info'] = data_files_info
with self._info_file.lock() as ff:
ff.write_json(self.info, overwrite=True)
else:
# Added in 0.7.5: check for a bug introduced in 0.7.4.
# Convert full path to file name.
# Version 0.7.4 was used very briefly, hence very few datasets
# were created by that version.
data_files_info = self.info['data_files_info']
if data_files_info:
new_info = None
if os.name == 'nt' and '\\' in data_files_info[0][0]:
new_info = [
(f[(f.rfind('\\') + 1) :], *_) for f, *_ in data_files_info
]
elif '/' in data_files_info[0][0]:
new_info = [
(f[(f.rfind('/') + 1) :], *_) for f, *_ in data_files_info
]
if new_info:
self.info['data_files_info'] = new_info
with self._info_file.lock() as ff:
ff.write_json(self.info, overwrite=True)
def __del__(self) -> None:
if getattr(self, 'keep_files', True) is False:
self.destroy(concurrent=False)
else:
self._warn_flush()
self.flush()
@property
def batch_size(self) -> int:
"""The max number of data items in one data file."""
return self.info['batch_size']
@property
def data_path(self) -> Upath:
return self.path / 'store'
@property
def storage_format(self) -> str:
"""The value of ``storage_format`` used in :meth:`new`, either user-specified or the default value."""
return self.info['storage_format'].replace('_', '-')
@property
def storage_version(self) -> int:
"""The internal format used in persistence. This is a read-only attribute for information only."""
return self.info.get('storage_version', 0)
def _warn_flush(self):
if self._append_buffer or self._append_files_buffer:
warnings.warn(
f"did you forget to flush {self.__class__.__name__} at '{self.path}'?"
)
def __len__(self) -> int:
"""
Number of data items in this biglist.
If data is being appended to this biglist, then this method only includes the items
that have been "flushed" to storage. Data items in the internal memory buffer
are not counted. The buffer is empty upon calling :meth:`_flush` (internally and automatically)
or :meth:`flush` (explicitly by user).
.. versionchanged:: 0.7.4
In previous versions, this count includes items that are not yet flushed.
"""
self._warn_flush()
return super().__len__()
def __getitem__(self, idx: int) -> Element:
"""
Access a data item by its index; negative index works as expected.
Items not yet "flushed" are not accessible by this method.
They are considered "invisible" to this method.
Similarly, negative ``idx`` operates in the range of flushed items only.
.. versionchanged:: 0.7.4
In previous versions, the accessible items include those that are not yet flushed.
"""
return super().__getitem__(idx)
def __iter__(self) -> Iterator[Element]:
"""
Iterate over all the elements.
Items that are not yet "flushed" are invisible to this iteration.
.. versionchanged:: 0.7.4
In previous versions, this iteration includes those items that are not yet flushed.
"""
self._warn_flush()
return super().__iter__()
def append(self, x: Element) -> None:
"""
Append a single element to the :class:`Biglist`.
In implementation, this appends to an in-memory buffer.
Once the buffer size reaches :data:`batch_size`, the buffer's content
will be persisted as a new data file, and the buffer will re-start empty.
In other words, whenever the buffer is non-empty,
its content is not yet persisted.
You can append data to a common biglist from multiple processes.
In the processes, use independent ``Biglist`` objects that point to the same "path".
Each of the objects will maintain its own in-memory buffer and save its own files once the buffer
fills up. Remember to :meth:`flush` at the end of work in each process.
"""
self._append_buffer.append(x)
if len(self._append_buffer) >= self.batch_size:
self._flush()
def extend(self, x: Iterable[Element]) -> None:
"""This simply calls :meth:`append` repeatedly."""
for v in x:
self.append(v)
def make_file_name(self, buffer_len: int, extra: str = '') -> str:
"""
This method constructs the file name of a data file.
If you need to customize this method for any reason, you should do it via ``extra``
and keep the other patterns unchanged.
The string ``extra`` will appear between other fixed patterns in the file name.
One possible usecase is this: in distributed writing, you want files written by different workers
to be distinguishable by the file names. Do something like this::
def worker(datapath: str, worker_id: str, ...):
out = Biglist(datapath)
_make_file_name = out.make_file_name
out.make_file_name = lambda buffer_len: _make_file_name(buffer_len, worker_id)
...
"""
if extra:
extra = extra.lstrip('_').rstrip('_') + '_'
return f"{datetime.utcnow().strftime('%Y%m%d%H%M%S.%f')}_{extra}{str(uuid4()).replace('-', '')[:10]}_{buffer_len}"
# File name pattern introduced on 7/25/2022.
# This should guarantee the file name is unique, hence
# we do not need to verify that this file name is not already used.
# Also include timestamp and item count in the file name, in case
# later we decide to use these pieces of info.
# Changes in 0.7.4: the time part changes from epoch to datetime, with guaranteed fixed length.
# Change in 0.8.4: the uuid part has dash removed and length reduced to 10; add ``extra``.
def _flush(self) -> None:
"""
Persist the content of the in-memory buffer to a file,
reset the buffer, and update relevant book-keeping variables.
This method is called any time the size of the in-memory buffer
reaches ``self.batch_size``. This happens w/o the user's intervention.
"""
if not self._append_buffer:
# Called by `self.flush`.
return
buffer = self._append_buffer
buffer_len = len(buffer)
self._append_buffer = []
datafile_ext = self.storage_format.replace('-', '_')
filename = f'{self.make_file_name(buffer_len)}.{datafile_ext}'
data_file = self.data_path / filename
if self._file_dumper is None:
self._file_dumper = Dumper(self._get_thread_pool(), self._n_write_threads)
self._file_dumper.dump_file(
self.registered_storage_formats[self.storage_format].dump,
buffer,
data_file,
**self._serialize_kwargs,
)
# This call will return quickly if the dumper has queue
# capacity for the file. The file meta data below
# will be updated as if the saving has completed, although
# it hasn't (it is only queued). If dumping failed, the entry
# will be deleted in `flush()`.
self._append_files_buffer.append((filename, buffer_len))
def flush(self, *, lock_timeout=300, raise_on_write_error: bool = True) -> None:
"""
:meth:`_flush` is called automatically whenever the "append buffer"
is full, so to persist the data and empty the buffer.
(The capacity of this buffer is equal to ``self.batch_size``.)
However, if this buffer is only partially filled when the user is done
adding elements to the biglist, the data in the buffer will not be persisted.
This is the first reason that user should call ``flush`` when they are done
adding data (via :meth:`append` or :meth:`extend`).
Although :meth:`_flush` creates new data files, it does not update the "meta info file"
(``info.json`` in the root of ``self.path``) to include the new data files;
it only updates the in-memory ``self.info``. This is for efficiency reasons,
because updating ``info.json`` involves locking.
Updating ``info.json`` to include new data files (created due to :meth:`append` and :meth:`extend`)
is performed by :meth:`flush`.
This is the second reason that user should call :meth:`flush` at the end of their
data writting session, regardless of whether all the new data have been persisted
in data files. (They would be if their count happens to be a multiple of ``self.batch_size``.)
If there are multiple workers adding data to this biglist at the same time
(from multiple processes or machines), data added by one worker will not be visible
to another worker until the writing worker calls :meth:`flush` and the reading worker
calls :meth:`reload`.
Further, user should assume that data not yet persisted (i.e. still in "append buffer")
are not visible to data reading via :meth:`__getitem__` or :meth:`__iter__` and not included in
:meth:`__len__`, even to the same worker. In common use cases, we do not start reading data
until we're done adding data to the biglist (at least "for now"), hence this is not
a big issue.
In summary, call :meth:`flush` when
- You are done adding data (for this "session")
- or you need to start reading data
:meth:`flush` has overhead. You should call it only in the two situations above.
**Do not** call it frequently "just to be safe".
After a call to ``flush()``, there's no problem to add more elements again by
:meth:`append` or :meth:`extend`. Data files created by ``flush()`` with less than
:data:`batch_size` elements will stay as is among larger files.
This is a legitimate case in parallel or distributed writing, or writing in
multiple sessions.
"""
self._flush()
if self._file_dumper is not None:
errors = self._file_dumper.wait(raise_on_error=raise_on_write_error)
if errors:
for file, e in errors:
logger.error('failed to write file %s: %r', file, e)
fname = file.name
for i, (f, _) in enumerate(self._append_files_buffer):
if f == fname:
self._append_files_buffer.pop(i)
break
if file.exists():
try:
file.remove_file()
except Exception as e:
logger.error('failed to delete file %s: %r', file, e)
# Other workers in other threads, processes, or machines may have appended data
# to the list. This block merges the appends by the current worker with
# appends by other workers. The last call to ``flush`` across all workers
# will get the final meta info right.
if self._append_files_buffer:
with self._info_file.lock(timeout=lock_timeout) as ff:
z0 = ff.read_json()['data_files_info']
z = sorted(
set((*(tuple(v[:2]) for v in z0), *self._append_files_buffer))
)
# TODO: maybe a merge sort can be more efficient.
cum = list(itertools.accumulate(v[1] for v in z))
z = [(a, b, c) for (a, b), c in zip(z, cum)]
self.info['data_files_info'] = z
ff.write_json(self.info, overwrite=True)
self._append_files_buffer.clear()
def reload(self) -> None:
"""
Reload the meta info.
This is used in this scenario: suppose we have this object pointing to a biglist
on the local disk; another object in another process is appending data to the same biglist
(that is, it points to the same storage location); then after a while, the meta info file
on the disk has been modified by the other process, hence the current object is out-dated;
calling this method will bring it up to date. The same idea applies if the storage is
in the cloud, and another machine is appending data to the same remote biglist.
Creating a new object pointing to the same storage location would achieve the same effect.
"""
self.info = self._info_file.read_json()
@property
def files(self):
# This method should be cheap to call.
# TODO: call `reload`?
self._warn_flush()
serde = self.registered_storage_formats[self.storage_format]
fun = serde.load
if self._deserialize_kwargs:
fun = functools.partial(fun, **self._deserialize_kwargs)
return BiglistFileSeq(
self.path,
[
(str(self.data_path / row[0]), *row[1:])
for row in self.info['data_files_info']
],
fun,
)
|
(*args, **kwargs)
|
709,197
|
biglist._biglist
|
__del__
| null |
def __del__(self) -> None:
if getattr(self, 'keep_files', True) is False:
self.destroy(concurrent=False)
else:
self._warn_flush()
self.flush()
|
(self) -> NoneType
|
709,198
|
biglist._biglist
|
__getitem__
|
Access a data item by its index; negative index works as expected.
Items not yet "flushed" are not accessible by this method.
They are considered "invisible" to this method.
Similarly, negative ``idx`` operates in the range of flushed items only.
.. versionchanged:: 0.7.4
In previous versions, the accessible items include those that are not yet flushed.
|
def __getitem__(self, idx: int) -> Element:
"""
Access a data item by its index; negative index works as expected.
Items not yet "flushed" are not accessible by this method.
They are considered "invisible" to this method.
Similarly, negative ``idx`` operates in the range of flushed items only.
.. versionchanged:: 0.7.4
In previous versions, the accessible items include those that are not yet flushed.
"""
return super().__getitem__(idx)
|
(self, idx: int) -> ~Element
|
709,199
|
biglist._biglist
|
__getstate__
| null |
def __getstate__(self):
return (self.path,)
|
(self)
|
709,200
|
biglist._biglist
|
__init__
|
Please see the base class for additional documentation.
|
def __init__(
self,
*args,
**kwargs,
):
"""
Please see the base class for additional documentation.
"""
super().__init__(*args, **kwargs)
self.keep_files: bool = True
"""Indicates whether the persisted files should be kept or deleted when the object is garbage-collected."""
self._append_buffer: list = []
self._append_files_buffer: list = []
self._file_dumper = None
self._n_write_threads = 4
"""This value affects memory demand during quick "appending" (and flushing/dumping in the background).
If the memory consumption of each batch is large, you could manually set this to a lower value, like::
lst = Biglist(path)
lst._n_write_threads = 4
"""
self._serialize_kwargs = self.info.get('serialize_kwargs', {})
self._deserialize_kwargs = self.info.get('deserialize_kwargs', {})
if self.storage_format == 'parquet' and 'schema_spec' in self._serialize_kwargs:
# Build the schema so that it does not need to be done each time the function
# ``ParquetSerializer.serialize`` is called. Maybe this does not matter.
assert 'schema' not in self._serialize_kwargs
kk = copy.deepcopy(self._serialize_kwargs)
kk['schema'] = make_parquet_schema(kk['schema_spec'])
del kk['schema_spec']
self._serialize_kwargs = kk
_biglist_objs.add(self)
# For back compat.
if self.info.get('storage_version', 0) < 3:
# This is not called by ``new``, instead is opening an existing dataset.
# Usually these legacy datasets are in a "read-only" mode, i.e., you should
# not append more data to them. If you do, the back-compat code below
# may not be totally reliable if the dataset is being used by multiple workers
# concurrently.
if 'data_files_info' not in self.info: # added in 0.7.4
if self.storage_version == 0:
# This may not be totally reliable in every scenario.
# The older version had a parameter `lazy`, which is gone now.
# After some time we may stop supporting this storage version. (7/27/2022)
# However, as long as older datasets are in a "read-only" status,
# this is fine.
try:
data_info_file = self.path / 'datafiles_info.json'
data_files = data_info_file.read_json()
# A list of tuples, (file_name, item_count)
except FileNotFoundError:
data_files = []
elif self.storage_version == 1:
# Starting with storage_version 1, data file name is
# <timestamp>_<uuid>_<itemcount>.<ext>
# <timestamp> contains a '.', no '_';
# <uuid> contains '-', no '_';
# <itemcount> contains no '-' nor '_';
# <ext> may contain '_'.
files0 = (v.name for v in self.data_path.iterdir())
files1 = (v.split('_') + [v] for v in files0)
files2 = (
(float(v[0]), v[-1], int(v[2].partition('.')[0]))
# timestamp, file name, item count
for v in files1
)
files = sorted(files2) # sort by timestamp
if files:
data_files = [
(v[1], v[2]) for v in files
] # file name, item count
else:
data_files = []
else:
pass
# `storage_version == 2 already has `data_files_info` in `self.info`,
# but its format may be bad, hence it's included below.
if data_files:
data_files_cumlength = list(
itertools.accumulate(v[1] for v in data_files)
)
data_files_info = [
(filename, count, cumcount)
for (filename, count), cumcount in zip(
data_files, data_files_cumlength
)
]
# Each element of the list is a tuple containing file name, item count in file, and cumsum of item counts.
else:
data_files_info = []
self.info['data_files_info'] = data_files_info
with self._info_file.lock() as ff:
ff.write_json(self.info, overwrite=True)
else:
# Added in 0.7.5: check for a bug introduced in 0.7.4.
# Convert full path to file name.
# Version 0.7.4 was used very briefly, hence very few datasets
# were created by that version.
data_files_info = self.info['data_files_info']
if data_files_info:
new_info = None
if os.name == 'nt' and '\\' in data_files_info[0][0]:
new_info = [
(f[(f.rfind('\\') + 1) :], *_) for f, *_ in data_files_info
]
elif '/' in data_files_info[0][0]:
new_info = [
(f[(f.rfind('/') + 1) :], *_) for f, *_ in data_files_info
]
if new_info:
self.info['data_files_info'] = new_info
with self._info_file.lock() as ff:
ff.write_json(self.info, overwrite=True)
|
(self, *args, **kwargs)
|
709,201
|
biglist._biglist
|
__iter__
|
Iterate over all the elements.
Items that are not yet "flushed" are invisible to this iteration.
.. versionchanged:: 0.7.4
In previous versions, this iteration includes those items that are not yet flushed.
|
def __iter__(self) -> Iterator[Element]:
"""
Iterate over all the elements.
Items that are not yet "flushed" are invisible to this iteration.
.. versionchanged:: 0.7.4
In previous versions, this iteration includes those items that are not yet flushed.
"""
self._warn_flush()
return super().__iter__()
|
(self) -> collections.abc.Iterator[~Element]
|
709,202
|
biglist._biglist
|
__len__
|
Number of data items in this biglist.
If data is being appended to this biglist, then this method only includes the items
that have been "flushed" to storage. Data items in the internal memory buffer
are not counted. The buffer is empty upon calling :meth:`_flush` (internally and automatically)
or :meth:`flush` (explicitly by user).
.. versionchanged:: 0.7.4
In previous versions, this count includes items that are not yet flushed.
|
def __len__(self) -> int:
"""
Number of data items in this biglist.
If data is being appended to this biglist, then this method only includes the items
that have been "flushed" to storage. Data items in the internal memory buffer
are not counted. The buffer is empty upon calling :meth:`_flush` (internally and automatically)
or :meth:`flush` (explicitly by user).
.. versionchanged:: 0.7.4
In previous versions, this count includes items that are not yet flushed.
"""
self._warn_flush()
return super().__len__()
|
(self) -> int
|
709,203
|
biglist._biglist
|
__repr__
| null |
def __repr__(self):
return f"<{self.__class__.__name__} at '{self.path}' with {self.num_data_items} elements in {self.num_data_files} data file(s)>"
|
(self)
|
709,204
|
biglist._biglist
|
__setstate__
| null |
def __setstate__(self, data):
self.__init__(data[0])
|
(self, data)
|
709,207
|
biglist._biglist
|
_flush
|
Persist the content of the in-memory buffer to a file,
reset the buffer, and update relevant book-keeping variables.
This method is called any time the size of the in-memory buffer
reaches ``self.batch_size``. This happens w/o the user's intervention.
|
def _flush(self) -> None:
"""
Persist the content of the in-memory buffer to a file,
reset the buffer, and update relevant book-keeping variables.
This method is called any time the size of the in-memory buffer
reaches ``self.batch_size``. This happens w/o the user's intervention.
"""
if not self._append_buffer:
# Called by `self.flush`.
return
buffer = self._append_buffer
buffer_len = len(buffer)
self._append_buffer = []
datafile_ext = self.storage_format.replace('-', '_')
filename = f'{self.make_file_name(buffer_len)}.{datafile_ext}'
data_file = self.data_path / filename
if self._file_dumper is None:
self._file_dumper = Dumper(self._get_thread_pool(), self._n_write_threads)
self._file_dumper.dump_file(
self.registered_storage_formats[self.storage_format].dump,
buffer,
data_file,
**self._serialize_kwargs,
)
# This call will return quickly if the dumper has queue
# capacity for the file. The file meta data below
# will be updated as if the saving has completed, although
# it hasn't (it is only queued). If dumping failed, the entry
# will be deleted in `flush()`.
self._append_files_buffer.append((filename, buffer_len))
|
(self) -> NoneType
|
709,208
|
biglist._biglist
|
_get_thread_pool
| null |
def _get_thread_pool(self):
if self._thread_pool_ is None:
self._thread_pool_ = get_global_thread_pool()
return self._thread_pool_
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.