sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
getzep/graphiti:graphiti_core/driver/neptune/operations/entity_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import entity_edge_from_record
from graphiti_core.edges import EntityEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.models.edges.edge_db_queries import (
get_entity_edge_return_query,
get_entity_edge_save_bulk_query,
get_entity_edge_save_query,
)
logger = logging.getLogger(__name__)
class NeptuneEntityEdgeOperations(EntityEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
edge_data: dict[str, Any] = {
'uuid': edge.uuid,
'source_uuid': edge.source_node_uuid,
'target_uuid': edge.target_node_uuid,
'name': edge.name,
'fact': edge.fact,
'fact_embedding': edge.fact_embedding,
'group_id': edge.group_id,
'episodes': edge.episodes,
'created_at': edge.created_at,
'expired_at': edge.expired_at,
'valid_at': edge.valid_at,
'invalid_at': edge.invalid_at,
}
edge_data.update(edge.attributes or {})
query = get_entity_edge_save_query(GraphProvider.NEPTUNE)
if tx is not None:
await tx.run(query, edge_data=edge_data)
else:
await executor.execute_query(query, edge_data=edge_data)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
prepared: list[dict[str, Any]] = []
for edge in edges:
edge_data: dict[str, Any] = {
'uuid': edge.uuid,
'source_node_uuid': edge.source_node_uuid,
'target_node_uuid': edge.target_node_uuid,
'name': edge.name,
'fact': edge.fact,
'fact_embedding': edge.fact_embedding,
'group_id': edge.group_id,
'episodes': edge.episodes,
'created_at': edge.created_at,
'expired_at': edge.expired_at,
'valid_at': edge.valid_at,
'invalid_at': edge.invalid_at,
}
edge_data.update(edge.attributes or {})
prepared.append(edge_data)
query = get_entity_edge_save_bulk_query(GraphProvider.NEPTUNE)
if tx is not None:
await tx.run(query, entity_edges=prepared)
else:
await executor.execute_query(query, entity_edges=prepared)
async def delete(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityEdge:
query = """
MATCH (n:Entity)-[e:RELATES_TO {uuid: $uuid}]->(m:Entity)
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [entity_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityEdge]:
if not uuids:
return []
query = """
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.uuid IN $uuids
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [entity_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.NEPTUNE)
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [entity_edge_from_record(r) for r in records]
async def get_between_nodes(
self,
executor: QueryExecutor,
source_node_uuid: str,
target_node_uuid: str,
) -> list[EntityEdge]:
query = """
MATCH (n:Entity {uuid: $source_node_uuid})-[e:RELATES_TO]->(m:Entity {uuid: $target_node_uuid})
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(
query,
source_node_uuid=source_node_uuid,
target_node_uuid=target_node_uuid,
)
return [entity_edge_from_record(r) for r in records]
async def get_by_node_uuid(
self,
executor: QueryExecutor,
node_uuid: str,
) -> list[EntityEdge]:
query = """
MATCH (n:Entity {uuid: $node_uuid})-[e:RELATES_TO]-(m:Entity)
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(query, node_uuid=node_uuid)
return [entity_edge_from_record(r) for r in records]
async def load_embeddings(
self,
executor: QueryExecutor,
edge: EntityEdge,
) -> None:
query = """
MATCH (n:Entity)-[e:RELATES_TO {uuid: $uuid}]->(m:Entity)
RETURN [x IN split(e.fact_embedding, ",") | toFloat(x)] AS fact_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=edge.uuid)
if len(records) == 0:
raise EdgeNotFoundError(edge.uuid)
edge.fact_embedding = records[0]['fact_embedding']
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
batch_size: int = 100,
) -> None:
uuids = [e.uuid for e in edges]
query = """
MATCH (n:Entity)-[e:RELATES_TO]-(m:Entity)
WHERE e.uuid IN $edge_uuids
RETURN DISTINCT e.uuid AS uuid, [x IN split(e.fact_embedding, ",") | toFloat(x)] AS fact_embedding
"""
records, _, _ = await executor.execute_query(query, edge_uuids=uuids)
embedding_map = {r['uuid']: r['fact_embedding'] for r in records}
for edge in edges:
if edge.uuid in embedding_map:
edge.fact_embedding = embedding_map[edge.uuid]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/entity_edge_ops.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/entity_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import entity_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
get_entity_node_return_query,
get_entity_node_save_bulk_query,
get_entity_node_save_query,
)
from graphiti_core.nodes import EntityNode
logger = logging.getLogger(__name__)
class NeptuneEntityNodeOperations(EntityNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
entity_data: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'name_embedding': node.name_embedding,
'group_id': node.group_id,
'summary': node.summary,
'created_at': node.created_at,
}
entity_data.update(node.attributes or {})
labels = ':'.join(list(set(node.labels + ['Entity'])))
query = get_entity_node_save_query(GraphProvider.NEPTUNE, labels)
if tx is not None:
await tx.run(query, entity_data=entity_data)
else:
await executor.execute_query(query, entity_data=entity_data)
logger.debug(f'Saved Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
prepared: list[dict[str, Any]] = []
for node in nodes:
entity_data: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'summary': node.summary,
'created_at': node.created_at,
'name_embedding': node.name_embedding,
'labels': list(set(node.labels + ['Entity'])),
}
entity_data.update(node.attributes or {})
prepared.append(entity_data)
queries = get_entity_node_save_bulk_query(GraphProvider.NEPTUNE, prepared)
for query in queries:
if tx is not None:
await tx.run(query, nodes=prepared)
else:
await executor.execute_query(query, nodes=prepared)
async def delete(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Entity {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityNode:
query = """
MATCH (n:Entity {uuid: $uuid})
RETURN
""" + get_entity_node_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(query, uuid=uuid)
nodes = [entity_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityNode]:
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [entity_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityNode]:
cursor_clause = 'AND n.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Entity)
WHERE n.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE)
+ """
ORDER BY n.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [entity_node_from_record(r) for r in records]
async def load_embeddings(
self,
executor: QueryExecutor,
node: EntityNode,
) -> None:
query = """
MATCH (n:Entity {uuid: $uuid})
RETURN [x IN split(n.name_embedding, ",") | toFloat(x)] AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=node.uuid)
if len(records) == 0:
raise NodeNotFoundError(node.uuid)
node.name_embedding = records[0]['name_embedding']
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
batch_size: int = 100,
) -> None:
uuids = [n.uuid for n in nodes]
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN DISTINCT n.uuid AS uuid, [x IN split(n.name_embedding, ",") | toFloat(x)] AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuids=uuids)
embedding_map = {r['uuid']: r['name_embedding'] for r in records}
for node in nodes:
if node.uuid in embedding_map:
node.name_embedding = embedding_map[node.uuid]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/entity_node_ops.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/episode_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from datetime import datetime
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import episodic_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
EPISODIC_NODE_RETURN_NEPTUNE,
get_episode_node_save_bulk_query,
get_episode_node_save_query,
)
from graphiti_core.nodes import EpisodicNode
logger = logging.getLogger(__name__)
class NeptuneEpisodeNodeOperations(EpisodeNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
query = get_episode_node_save_query(GraphProvider.NEPTUNE)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'source_description': node.source_description,
'content': node.content,
'entity_edges': node.entity_edges,
'created_at': node.created_at,
'valid_at': node.valid_at,
'source': node.source.value,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Episode to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EpisodicNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
episodes = []
for node in nodes:
ep = dict(node)
ep['source'] = str(ep['source'].value)
ep.pop('labels', None)
episodes.append(ep)
query = get_episode_node_save_bulk_query(GraphProvider.NEPTUNE)
if tx is not None:
await tx.run(query, episodes=episodes)
else:
await executor.execute_query(query, episodes=episodes)
async def delete(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Episodic {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Episodic)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicNode:
query = (
"""
MATCH (e:Episodic {uuid: $uuid})
RETURN
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
episodes = [episodic_node_from_record(r) for r in records]
if len(episodes) == 0:
raise NodeNotFoundError(uuid)
return episodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicNode]:
query = (
"""
MATCH (e:Episodic)
WHERE e.uuid IN $uuids
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [episodic_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicNode]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (e:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
+ """
ORDER BY uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [episodic_node_from_record(r) for r in records]
async def get_by_entity_node_uuid(
self,
executor: QueryExecutor,
entity_node_uuid: str,
) -> list[EpisodicNode]:
query = (
"""
MATCH (e:Episodic)-[r:MENTIONS]->(n:Entity {uuid: $entity_node_uuid})
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
)
records, _, _ = await executor.execute_query(query, entity_node_uuid=entity_node_uuid)
return [episodic_node_from_record(r) for r in records]
async def retrieve_episodes(
self,
executor: QueryExecutor,
reference_time: datetime,
last_n: int = 3,
group_ids: list[str] | None = None,
source: str | None = None,
saga: str | None = None,
) -> list[EpisodicNode]:
if saga is not None and group_ids is not None and len(group_ids) > 0:
source_clause = 'AND e.source = $source' if source else ''
query = (
"""
MATCH (s:Saga {name: $saga_name, group_id: $group_id})-[:HAS_EPISODE]->(e:Episodic)
WHERE e.valid_at <= $reference_time
"""
+ source_clause
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
+ """
ORDER BY e.valid_at DESC
LIMIT $num_episodes
"""
)
records, _, _ = await executor.execute_query(
query,
saga_name=saga,
group_id=group_ids[0],
reference_time=reference_time,
source=source,
num_episodes=last_n,
)
else:
source_clause = 'AND e.source = $source' if source else ''
group_clause = 'AND e.group_id IN $group_ids' if group_ids else ''
query = (
"""
MATCH (e:Episodic)
WHERE e.valid_at <= $reference_time
"""
+ group_clause
+ source_clause
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
+ """
ORDER BY e.valid_at DESC
LIMIT $num_episodes
"""
)
records, _, _ = await executor.execute_query(
query,
reference_time=reference_time,
group_ids=group_ids,
source=source,
num_episodes=last_n,
)
return [episodic_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/episode_node_ops.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/episodic_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import EpisodicEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
EPISODIC_EDGE_RETURN,
EPISODIC_EDGE_SAVE,
get_episodic_edge_save_bulk_query,
)
logger = logging.getLogger(__name__)
def _episodic_edge_from_record(record: Any) -> EpisodicEdge:
return EpisodicEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class NeptuneEpisodicEdgeOperations(EpisodicEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'episode_uuid': edge.source_node_uuid,
'entity_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(EPISODIC_EDGE_SAVE, **params)
else:
await executor.execute_query(EPISODIC_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EpisodicEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = get_episodic_edge_save_bulk_query(GraphProvider.NEPTUNE)
edge_dicts = [e.model_dump() for e in edges]
if tx is not None:
await tx.run(query, episodic_edges=edge_dicts)
else:
await executor.execute_query(query, episodic_edges=edge_dicts)
async def delete(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicEdge:
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS {uuid: $uuid}]->(m:Entity)
RETURN
"""
+ EPISODIC_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_episodic_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicEdge]:
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS]->(m:Entity)
WHERE e.uuid IN $uuids
RETURN
"""
+ EPISODIC_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_episodic_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS]->(m:Entity)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ EPISODIC_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_episodic_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/episodic_edge_ops.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/graph_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.graph_ops import GraphMaintenanceOperations
from graphiti_core.driver.operations.graph_utils import Neighbor, label_propagation
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.driver.record_parsers import community_node_from_record, entity_node_from_record
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN_NEPTUNE,
get_entity_node_return_query,
)
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
if TYPE_CHECKING:
from graphiti_core.driver.neptune_driver import NeptuneDriver
logger = logging.getLogger(__name__)
class NeptuneGraphMaintenanceOperations(GraphMaintenanceOperations):
def __init__(self, driver: NeptuneDriver | None = None):
self._driver = driver
async def clear_data(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> None:
if group_ids is None:
await executor.execute_query('MATCH (n) DETACH DELETE n')
else:
for label in ['Entity', 'Episodic', 'Community']:
await executor.execute_query(
f"""
MATCH (n:{label})
WHERE n.group_id IN $group_ids
DETACH DELETE n
""",
group_ids=group_ids,
)
async def build_indices_and_constraints(
self,
executor: QueryExecutor,
delete_existing: bool = False,
) -> None:
if self._driver is None:
return
if delete_existing:
await self._driver.delete_aoss_indices()
await self._driver.create_aoss_indices()
async def delete_all_indexes(
self,
executor: QueryExecutor,
) -> None:
if self._driver is None:
return
await self._driver.delete_aoss_indices()
async def get_community_clusters(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> list[Any]:
community_clusters: list[list[EntityNode]] = []
if group_ids is None:
group_id_values, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.group_id IS NOT NULL
RETURN
collect(DISTINCT n.group_id) AS group_ids
"""
)
group_ids = group_id_values[0]['group_ids'] if group_id_values else []
resolved_group_ids: list[str] = group_ids or []
for group_id in resolved_group_ids:
projection: dict[str, list[Neighbor]] = {}
# Get all entity nodes for this group
node_records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.group_id IN $group_ids
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE),
group_ids=[group_id],
)
nodes = [entity_node_from_record(r) for r in node_records]
for node in nodes:
records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity {group_id: $group_id, uuid: $uuid})-[e:RELATES_TO]-(m: Entity {group_id: $group_id})
WITH count(e) AS count, m.uuid AS uuid
RETURN
uuid,
count
""",
uuid=node.uuid,
group_id=group_id,
)
projection[node.uuid] = [
Neighbor(node_uuid=record['uuid'], edge_count=record['count'])
for record in records
]
cluster_uuids = label_propagation(projection)
# Fetch full node objects for each cluster
for cluster in cluster_uuids:
if not cluster:
continue
cluster_records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE),
uuids=cluster,
)
community_clusters.append([entity_node_from_record(r) for r in cluster_records])
return community_clusters
async def remove_communities(
self,
executor: QueryExecutor,
) -> None:
await executor.execute_query(
"""
MATCH (c:Community)
DETACH DELETE c
"""
)
async def determine_entity_community(
self,
executor: QueryExecutor,
entity: EntityNode,
) -> None:
# Check if the node is already part of a community
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(n:Entity {uuid: $entity_uuid})
WITH c AS n
RETURN
"""
+ COMMUNITY_NODE_RETURN_NEPTUNE,
entity_uuid=entity.uuid,
)
if len(records) > 0:
return
# If the node has no community, find the mode community of surrounding entities
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(m:Entity)-[:RELATES_TO]-(n:Entity {uuid: $entity_uuid})
WITH c AS n
RETURN
"""
+ COMMUNITY_NODE_RETURN_NEPTUNE,
entity_uuid=entity.uuid,
)
async def get_mentioned_nodes(
self,
executor: QueryExecutor,
episodes: list[EpisodicNode],
) -> list[EntityNode]:
episode_uuids = [episode.uuid for episode in episodes]
records, _, _ = await executor.execute_query(
"""
MATCH (episode:Episodic)-[:MENTIONS]->(n:Entity)
WHERE episode.uuid IN $uuids
RETURN DISTINCT
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE),
uuids=episode_uuids,
)
return [entity_node_from_record(r) for r in records]
async def get_communities_by_nodes(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
) -> list[CommunityNode]:
node_uuids = [node.uuid for node in nodes]
records, _, _ = await executor.execute_query(
"""
MATCH (n:Community)-[:HAS_MEMBER]->(m:Entity)
WHERE m.uuid IN $uuids
RETURN DISTINCT
"""
+ COMMUNITY_NODE_RETURN_NEPTUNE,
uuids=node_uuids,
)
return [community_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/graph_ops.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/has_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import HasEpisodeEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
HAS_EPISODE_EDGE_RETURN,
HAS_EPISODE_EDGE_SAVE,
)
logger = logging.getLogger(__name__)
def _has_episode_edge_from_record(record: Any) -> HasEpisodeEdge:
return HasEpisodeEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class NeptuneHasEpisodeEdgeOperations(HasEpisodeEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'saga_uuid': edge.source_node_uuid,
'episode_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(HAS_EPISODE_EDGE_SAVE, **params)
else:
await executor.execute_query(HAS_EPISODE_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[HasEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
for edge in edges:
await self.save(executor, edge, tx=tx)
async def delete(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga)-[e:HAS_EPISODE {uuid: $uuid}]->(m:Episodic)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> HasEpisodeEdge:
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE {uuid: $uuid}]->(m:Episodic)
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_has_episode_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[HasEpisodeEdge]:
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_has_episode_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[HasEpisodeEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_has_episode_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/has_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/next_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import NextEpisodeEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
NEXT_EPISODE_EDGE_RETURN,
NEXT_EPISODE_EDGE_SAVE,
)
logger = logging.getLogger(__name__)
def _next_episode_edge_from_record(record: Any) -> NextEpisodeEdge:
return NextEpisodeEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class NeptuneNextEpisodeEdgeOperations(NextEpisodeEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'source_episode_uuid': edge.source_node_uuid,
'target_episode_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(NEXT_EPISODE_EDGE_SAVE, **params)
else:
await executor.execute_query(NEXT_EPISODE_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[NextEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
for edge in edges:
await self.save(executor, edge, tx=tx)
async def delete(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Episodic)-[e:NEXT_EPISODE {uuid: $uuid}]->(m:Episodic)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> NextEpisodeEdge:
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE {uuid: $uuid}]->(m:Episodic)
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_next_episode_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[NextEpisodeEdge]:
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_next_episode_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[NextEpisodeEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_next_episode_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/next_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/saga_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.nodes.node_db_queries import (
SAGA_NODE_RETURN_NEPTUNE,
get_saga_node_save_query,
)
from graphiti_core.nodes import SagaNode
logger = logging.getLogger(__name__)
def _saga_node_from_record(record: Any) -> SagaNode:
return SagaNode(
uuid=record['uuid'],
name=record['name'],
group_id=record['group_id'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class NeptuneSagaNodeOperations(SagaNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
query = get_saga_node_save_query(GraphProvider.NEPTUNE)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'created_at': node.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Saga Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[SagaNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
for node in nodes:
await self.save(executor, node, tx=tx)
async def delete(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga {uuid: $uuid})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Saga {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Saga)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> SagaNode:
query = (
"""
MATCH (s:Saga {uuid: $uuid})
RETURN
"""
+ SAGA_NODE_RETURN_NEPTUNE
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
nodes = [_saga_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[SagaNode]:
query = (
"""
MATCH (s:Saga)
WHERE s.uuid IN $uuids
RETURN
"""
+ SAGA_NODE_RETURN_NEPTUNE
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_saga_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[SagaNode]:
cursor_clause = 'AND s.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (s:Saga)
WHERE s.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ SAGA_NODE_RETURN_NEPTUNE
+ """
ORDER BY s.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_saga_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/saga_node_ops.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune/operations/search_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.search_ops import SearchOperations
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.driver.record_parsers import (
community_node_from_record,
entity_edge_from_record,
entity_node_from_record,
episodic_node_from_record,
)
from graphiti_core.edges import EntityEdge
from graphiti_core.models.edges.edge_db_queries import get_entity_edge_return_query
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN_NEPTUNE,
EPISODIC_NODE_RETURN_NEPTUNE,
get_entity_node_return_query,
)
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
from graphiti_core.search.search_filters import (
SearchFilters,
edge_search_filter_query_constructor,
node_search_filter_query_constructor,
)
from graphiti_core.search.search_utils import calculate_cosine_similarity
if TYPE_CHECKING:
from graphiti_core.driver.neptune_driver import NeptuneDriver
logger = logging.getLogger(__name__)
class NeptuneSearchOperations(SearchOperations):
def __init__(self, driver: NeptuneDriver | None = None):
self._driver = driver
# --- Node search ---
async def node_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]:
if self._driver is None:
return []
driver = self._driver
res = driver.run_aoss_query('node_name_and_summary', query, limit=limit)
if not res or res.get('hits', {}).get('total', {}).get('value', 0) == 0:
return []
input_ids = []
for r in res['hits']['hits']:
input_ids.append({'id': r['_source']['uuid'], 'score': r['_score']})
cypher = (
"""
UNWIND $ids as i
MATCH (n:Entity)
WHERE n.uuid=i.id
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE)
+ """
ORDER BY i.score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
)
return [entity_node_from_record(r) for r in records]
async def node_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityNode]:
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.NEPTUNE
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
# Neptune: fetch all embeddings, compute cosine in Python
query = (
'MATCH (n:Entity)'
+ filter_query
+ """
RETURN DISTINCT id(n) as id, n.name_embedding as embedding
"""
)
resp, _, _ = await executor.execute_query(
query,
**filter_params,
)
if not resp:
return []
input_ids = []
for r in resp:
if r['embedding']:
score = calculate_cosine_similarity(
search_vector, list(map(float, r['embedding'].split(',')))
)
if score > min_score:
input_ids.append({'id': r['id'], 'score': score})
if not input_ids:
return []
cypher = (
"""
UNWIND $ids as i
MATCH (n:Entity)
WHERE id(n)=i.id
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE)
+ """
ORDER BY i.score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
)
return [entity_node_from_record(r) for r in records]
async def node_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
search_filter: SearchFilters,
max_depth: int,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]:
if not origin_uuids or max_depth < 1:
return []
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.NEPTUNE
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_queries.append('origin.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' AND ' + (' AND '.join(filter_queries))
cypher = (
f"""
UNWIND $bfs_origin_node_uuids AS origin_uuid
MATCH (origin {{uuid: origin_uuid}})-[e:RELATES_TO|MENTIONS*1..{max_depth}]->(n:Entity)
WHERE (origin:Entity OR origin:Episodic)
AND n.group_id = origin.group_id
"""
+ filter_query
+ """
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEPTUNE)
+ """
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
bfs_origin_node_uuids=origin_uuids,
limit=limit,
**filter_params,
)
return [entity_node_from_record(r) for r in records]
# --- Edge search ---
async def edge_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]:
if self._driver is None:
return []
driver = self._driver
res = driver.run_aoss_query('edge_name_and_fact', query)
if not res or res.get('hits', {}).get('total', {}).get('value', 0) == 0:
return []
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.NEPTUNE
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' AND ' + (' AND '.join(filter_queries))
input_ids = []
for r in res['hits']['hits']:
input_ids.append({'id': r['_source']['uuid'], 'score': r['_score']})
cypher = (
"""
UNWIND $ids as id
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.uuid = id.id
"""
+ filter_query
+ """
WITH e, id.score as score, n, m
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.NEPTUNE)
+ """
ORDER BY score DESC LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
async def edge_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
source_node_uuid: str | None,
target_node_uuid: str | None,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityEdge]:
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.NEPTUNE
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
if source_node_uuid is not None:
filter_params['source_uuid'] = source_node_uuid
filter_queries.append('n.uuid = $source_uuid')
if target_node_uuid is not None:
filter_params['target_uuid'] = target_node_uuid
filter_queries.append('m.uuid = $target_uuid')
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
# Fetch all embeddings, compute cosine similarity in Python
query = (
'MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)'
+ filter_query
+ """
RETURN DISTINCT id(e) as id, e.fact_embedding as embedding
"""
)
resp, _, _ = await executor.execute_query(
query,
**filter_params,
)
if not resp:
return []
input_ids = []
for r in resp:
if r['embedding']:
score = calculate_cosine_similarity(
search_vector, list(map(float, r['embedding'].split(',')))
)
if score > min_score:
input_ids.append({'id': r['id'], 'score': score})
if not input_ids:
return []
cypher = """
UNWIND $ids as i
MATCH ()-[r]->()
WHERE id(r) = i.id
RETURN
r.uuid AS uuid,
r.group_id AS group_id,
startNode(r).uuid AS source_node_uuid,
endNode(r).uuid AS target_node_uuid,
r.created_at AS created_at,
r.name AS name,
r.fact AS fact,
split(r.episodes, ",") AS episodes,
r.expired_at AS expired_at,
r.valid_at AS valid_at,
r.invalid_at AS invalid_at,
properties(r) AS attributes
ORDER BY i.score DESC
LIMIT $limit
"""
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
)
return [entity_edge_from_record(r) for r in records]
async def edge_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
max_depth: int,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]:
if not origin_uuids:
return []
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.NEPTUNE
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
f"""
UNWIND $bfs_origin_node_uuids AS origin_uuid
MATCH path = (origin {{uuid: origin_uuid}})-[:RELATES_TO|MENTIONS *1..{max_depth}]->(n:Entity)
WHERE origin:Entity OR origin:Episodic
UNWIND relationships(path) AS rel
MATCH (n:Entity)-[e:RELATES_TO {{uuid: rel.uuid}}]-(m:Entity)
"""
+ filter_query
+ """
RETURN DISTINCT
e.uuid AS uuid,
e.group_id AS group_id,
startNode(e).uuid AS source_node_uuid,
endNode(e).uuid AS target_node_uuid,
e.created_at AS created_at,
e.name AS name,
e.fact AS fact,
split(e.episodes, ',') AS episodes,
e.expired_at AS expired_at,
e.valid_at AS valid_at,
e.invalid_at AS invalid_at,
properties(e) AS attributes
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
bfs_origin_node_uuids=origin_uuids,
limit=limit,
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
# --- Episode search ---
async def episode_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters, # noqa: ARG002
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EpisodicNode]:
if self._driver is None:
return []
driver = self._driver
res = driver.run_aoss_query('episode_content', query, limit=limit)
if not res or res.get('hits', {}).get('total', {}).get('value', 0) == 0:
return []
input_ids = []
for r in res['hits']['hits']:
input_ids.append({'id': r['_source']['uuid'], 'score': r['_score']})
cypher = (
"""
UNWIND $ids as i
MATCH (e:Episodic)
WHERE e.uuid=i.id
RETURN
"""
+ EPISODIC_NODE_RETURN_NEPTUNE
+ """
ORDER BY i.score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
)
return [episodic_node_from_record(r) for r in records]
# --- Community search ---
async def community_fulltext_search(
self,
executor: QueryExecutor,
query: str,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[CommunityNode]:
if self._driver is None:
return []
driver = self._driver
res = driver.run_aoss_query('community_name', query, limit=limit)
if not res or res.get('hits', {}).get('total', {}).get('value', 0) == 0:
return []
input_ids = []
for r in res['hits']['hits']:
input_ids.append({'id': r['_source']['uuid'], 'score': r['_score']})
cypher = (
"""
UNWIND $ids as i
MATCH (n:Community)
WHERE n.uuid=i.id
RETURN
"""
+ COMMUNITY_NODE_RETURN_NEPTUNE
+ """
ORDER BY i.score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
)
return [community_node_from_record(r) for r in records]
async def community_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[CommunityNode]:
query_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query += ' WHERE n.group_id IN $group_ids'
query_params['group_ids'] = group_ids
query = (
'MATCH (n:Community)'
+ group_filter_query
+ """
RETURN DISTINCT id(n) as id, n.name_embedding as embedding
"""
)
resp, _, _ = await executor.execute_query(
query,
**query_params,
)
if not resp:
return []
input_ids = []
for r in resp:
if r['embedding']:
score = calculate_cosine_similarity(
search_vector, list(map(float, r['embedding'].split(',')))
)
if score > min_score:
input_ids.append({'id': r['id'], 'score': score})
if not input_ids:
return []
cypher = (
"""
UNWIND $ids as i
MATCH (n:Community)
WHERE id(n)=i.id
RETURN
"""
+ COMMUNITY_NODE_RETURN_NEPTUNE
+ """
ORDER BY i.score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
ids=input_ids,
limit=limit,
)
return [community_node_from_record(r) for r in records]
# --- Rerankers ---
async def node_distance_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
center_node_uuid: str,
min_score: float = 0,
) -> list[EntityNode]:
filtered_uuids = [u for u in node_uuids if u != center_node_uuid]
scores: dict[str, float] = {center_node_uuid: 0.0}
cypher = """
UNWIND $node_uuids AS node_uuid
MATCH (center:Entity {uuid: $center_uuid})-[:RELATES_TO]-(n:Entity {uuid: node_uuid})
RETURN 1 AS score, node_uuid AS uuid
"""
results, _, _ = await executor.execute_query(
cypher,
node_uuids=filtered_uuids,
center_uuid=center_node_uuid,
)
for result in results:
scores[result['uuid']] = result['score']
for uuid in filtered_uuids:
if uuid not in scores:
scores[uuid] = float('inf')
filtered_uuids.sort(key=lambda cur_uuid: scores[cur_uuid])
if center_node_uuid in node_uuids:
scores[center_node_uuid] = 0.1
filtered_uuids = [center_node_uuid] + filtered_uuids
reranked_uuids = [u for u in filtered_uuids if (1 / scores[u]) >= min_score]
if not reranked_uuids:
return []
get_query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(get_query, uuids=reranked_uuids)
node_map = {r['uuid']: entity_node_from_record(r) for r in records}
return [node_map[u] for u in reranked_uuids if u in node_map]
async def episode_mentions_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
min_score: float = 0,
) -> list[EntityNode]:
if not node_uuids:
return []
scores: dict[str, float] = {}
results, _, _ = await executor.execute_query(
"""
UNWIND $node_uuids AS node_uuid
MATCH (episode:Episodic)-[r:MENTIONS]->(n:Entity {uuid: node_uuid})
RETURN count(*) AS score, n.uuid AS uuid
""",
node_uuids=node_uuids,
)
for result in results:
scores[result['uuid']] = result['score']
for uuid in node_uuids:
if uuid not in scores:
scores[uuid] = float('inf')
sorted_uuids = list(node_uuids)
sorted_uuids.sort(key=lambda cur_uuid: scores[cur_uuid])
reranked_uuids = [u for u in sorted_uuids if scores[u] >= min_score]
if not reranked_uuids:
return []
get_query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.NEPTUNE)
records, _, _ = await executor.execute_query(get_query, uuids=reranked_uuids)
node_map = {r['uuid']: entity_node_from_record(r) for r in records}
return [node_map[u] for u in reranked_uuids if u in node_map]
# --- Filter builders ---
def build_node_search_filters(self, search_filters: SearchFilters) -> Any:
filter_queries, filter_params = node_search_filter_query_constructor(
search_filters, GraphProvider.NEPTUNE
)
return {'filter_queries': filter_queries, 'filter_params': filter_params}
def build_edge_search_filters(self, search_filters: SearchFilters) -> Any:
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filters, GraphProvider.NEPTUNE
)
return {'filter_queries': filter_queries, 'filter_params': filter_params}
# --- Fulltext query builder ---
def build_fulltext_query(
self,
query: str,
group_ids: list[str] | None = None,
max_query_length: int = 8000,
) -> str:
# Neptune uses AOSS for fulltext, so this is not used directly
return query
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune/operations/search_ops.py",
"license": "Apache License 2.0",
"lines": 588,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/operations/graph_utils.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
from pydantic import BaseModel
class Neighbor(BaseModel):
node_uuid: str
edge_count: int
def label_propagation(projection: dict[str, list[Neighbor]]) -> list[list[str]]:
community_map = {uuid: i for i, uuid in enumerate(projection.keys())}
while True:
no_change = True
new_community_map: dict[str, int] = {}
for uuid, neighbors in projection.items():
curr_community = community_map[uuid]
community_candidates: dict[int, int] = defaultdict(int)
for neighbor in neighbors:
community_candidates[community_map[neighbor.node_uuid]] += neighbor.edge_count
community_lst = [
(count, community) for community, count in community_candidates.items()
]
community_lst.sort(reverse=True)
candidate_rank, community_candidate = community_lst[0] if community_lst else (0, -1)
if community_candidate != -1 and candidate_rank > 1:
new_community = community_candidate
else:
new_community = max(community_candidate, curr_community)
new_community_map[uuid] = new_community
if new_community != curr_community:
no_change = False
if no_change:
break
community_map = new_community_map
community_cluster_map: dict[int, list[str]] = defaultdict(list)
for uuid, community in community_map.items():
community_cluster_map[community].append(uuid)
return list(community_cluster_map.values())
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/graph_utils.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/community_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.community_edge_ops import CommunityEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import CommunityEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
COMMUNITY_EDGE_RETURN,
get_community_edge_save_query,
)
logger = logging.getLogger(__name__)
def _community_edge_from_record(record: Any) -> CommunityEdge:
return CommunityEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class FalkorCommunityEdgeOperations(CommunityEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None:
query = get_community_edge_save_query(GraphProvider.FALKORDB)
params: dict[str, Any] = {
'community_uuid': edge.source_node_uuid,
'entity_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def delete(
self,
executor: QueryExecutor,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> CommunityEdge:
query = (
"""
MATCH (n:Community)-[e:HAS_MEMBER {uuid: $uuid}]->(m)
RETURN
"""
+ COMMUNITY_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_community_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[CommunityEdge]:
query = (
"""
MATCH (n:Community)-[e:HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
RETURN
"""
+ COMMUNITY_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_community_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Community)-[e:HAS_MEMBER]->(m)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ COMMUNITY_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_community_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/community_edge_ops.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/community_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.community_node_ops import CommunityNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import community_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN,
get_community_node_save_query,
)
from graphiti_core.nodes import CommunityNode
logger = logging.getLogger(__name__)
class FalkorCommunityNodeOperations(CommunityNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: CommunityNode,
tx: Transaction | None = None,
) -> None:
query = get_community_node_save_query(GraphProvider.FALKORDB)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'summary': node.summary,
'name_embedding': node.name_embedding,
'created_at': node.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Community Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[CommunityNode],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
for node in nodes:
await self.save(executor, node, tx=tx)
async def delete(
self,
executor: QueryExecutor,
node: CommunityNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
OPTIONAL MATCH (n)-[r]-()
WITH collect(r.uuid) AS edge_uuids, n
DETACH DELETE n
RETURN edge_uuids
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Community {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Community)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> CommunityNode:
query = (
"""
MATCH (c:Community {uuid: $uuid})
RETURN
"""
+ COMMUNITY_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
nodes = [community_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[CommunityNode]:
query = (
"""
MATCH (c:Community)
WHERE c.uuid IN $uuids
RETURN
"""
+ COMMUNITY_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [community_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityNode]:
cursor_clause = 'AND c.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (c:Community)
WHERE c.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ COMMUNITY_NODE_RETURN
+ """
ORDER BY c.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [community_node_from_record(r) for r in records]
async def load_name_embedding(
self,
executor: QueryExecutor,
node: CommunityNode,
) -> None:
query = """
MATCH (c:Community {uuid: $uuid})
RETURN c.name_embedding AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=node.uuid)
if len(records) == 0:
raise NodeNotFoundError(node.uuid)
node.name_embedding = records[0]['name_embedding']
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/community_node_ops.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/entity_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import entity_edge_from_record
from graphiti_core.edges import EntityEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.models.edges.edge_db_queries import (
get_entity_edge_return_query,
get_entity_edge_save_bulk_query,
get_entity_edge_save_query,
)
logger = logging.getLogger(__name__)
class FalkorEntityEdgeOperations(EntityEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
edge_data: dict[str, Any] = {
'uuid': edge.uuid,
'source_uuid': edge.source_node_uuid,
'target_uuid': edge.target_node_uuid,
'name': edge.name,
'fact': edge.fact,
'fact_embedding': edge.fact_embedding,
'group_id': edge.group_id,
'episodes': edge.episodes,
'created_at': edge.created_at,
'expired_at': edge.expired_at,
'valid_at': edge.valid_at,
'invalid_at': edge.invalid_at,
}
edge_data.update(edge.attributes or {})
query = get_entity_edge_save_query(GraphProvider.FALKORDB)
if tx is not None:
await tx.run(query, edge_data=edge_data)
else:
await executor.execute_query(query, edge_data=edge_data)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
prepared: list[dict[str, Any]] = []
for edge in edges:
edge_data: dict[str, Any] = {
'uuid': edge.uuid,
'source_node_uuid': edge.source_node_uuid,
'target_node_uuid': edge.target_node_uuid,
'name': edge.name,
'fact': edge.fact,
'fact_embedding': edge.fact_embedding,
'group_id': edge.group_id,
'episodes': edge.episodes,
'created_at': edge.created_at,
'expired_at': edge.expired_at,
'valid_at': edge.valid_at,
'invalid_at': edge.invalid_at,
}
edge_data.update(edge.attributes or {})
prepared.append(edge_data)
query = get_entity_edge_save_bulk_query(GraphProvider.FALKORDB)
if tx is not None:
await tx.run(query, entity_edges=prepared)
else:
await executor.execute_query(query, entity_edges=prepared)
async def delete(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityEdge:
query = """
MATCH (n:Entity)-[e:RELATES_TO {uuid: $uuid}]->(m:Entity)
RETURN
""" + get_entity_edge_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [entity_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityEdge]:
if not uuids:
return []
query = """
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.uuid IN $uuids
RETURN
""" + get_entity_edge_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [entity_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.FALKORDB)
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [entity_edge_from_record(r) for r in records]
async def get_between_nodes(
self,
executor: QueryExecutor,
source_node_uuid: str,
target_node_uuid: str,
) -> list[EntityEdge]:
query = """
MATCH (n:Entity {uuid: $source_node_uuid})-[e:RELATES_TO]->(m:Entity {uuid: $target_node_uuid})
RETURN
""" + get_entity_edge_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(
query,
source_node_uuid=source_node_uuid,
target_node_uuid=target_node_uuid,
)
return [entity_edge_from_record(r) for r in records]
async def get_by_node_uuid(
self,
executor: QueryExecutor,
node_uuid: str,
) -> list[EntityEdge]:
query = """
MATCH (n:Entity {uuid: $node_uuid})-[e:RELATES_TO]-(m:Entity)
RETURN
""" + get_entity_edge_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(query, node_uuid=node_uuid)
return [entity_edge_from_record(r) for r in records]
async def load_embeddings(
self,
executor: QueryExecutor,
edge: EntityEdge,
) -> None:
query = """
MATCH (n:Entity)-[e:RELATES_TO {uuid: $uuid}]->(m:Entity)
RETURN e.fact_embedding AS fact_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=edge.uuid)
if len(records) == 0:
raise EdgeNotFoundError(edge.uuid)
edge.fact_embedding = records[0]['fact_embedding']
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
batch_size: int = 100, # noqa: ARG002
) -> None:
uuids = [e.uuid for e in edges]
query = """
MATCH (n:Entity)-[e:RELATES_TO]-(m:Entity)
WHERE e.uuid IN $edge_uuids
RETURN DISTINCT e.uuid AS uuid, e.fact_embedding AS fact_embedding
"""
records, _, _ = await executor.execute_query(query, edge_uuids=uuids)
embedding_map = {r['uuid']: r['fact_embedding'] for r in records}
for edge in edges:
if edge.uuid in embedding_map:
edge.fact_embedding = embedding_map[edge.uuid]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/entity_edge_ops.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/entity_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import entity_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
get_entity_node_return_query,
get_entity_node_save_bulk_query,
get_entity_node_save_query,
)
from graphiti_core.nodes import EntityNode
logger = logging.getLogger(__name__)
class FalkorEntityNodeOperations(EntityNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
entity_data: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'name_embedding': node.name_embedding,
'group_id': node.group_id,
'summary': node.summary,
'created_at': node.created_at,
}
entity_data.update(node.attributes or {})
labels = ':'.join(list(set(node.labels + ['Entity'])))
query = get_entity_node_save_query(GraphProvider.FALKORDB, labels)
if tx is not None:
await tx.run(query, entity_data=entity_data)
else:
await executor.execute_query(query, entity_data=entity_data)
logger.debug(f'Saved Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
prepared: list[dict[str, Any]] = []
for node in nodes:
entity_data: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'summary': node.summary,
'created_at': node.created_at,
'name_embedding': node.name_embedding,
'labels': list(set(node.labels + ['Entity'])),
}
entity_data.update(node.attributes or {})
prepared.append(entity_data)
# FalkorDB returns a list of (query, params) tuples for bulk save
queries: list[tuple[str, dict[str, Any]]] = get_entity_node_save_bulk_query( # type: ignore[assignment]
GraphProvider.FALKORDB, prepared
)
for query, params in queries:
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
async def delete(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
OPTIONAL MATCH (n)-[r]-()
WITH collect(r.uuid) AS edge_uuids, n
DETACH DELETE n
RETURN edge_uuids
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Entity {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityNode:
query = """
MATCH (n:Entity {uuid: $uuid})
RETURN
""" + get_entity_node_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(query, uuid=uuid)
nodes = [entity_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityNode]:
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [entity_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityNode]:
cursor_clause = 'AND n.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Entity)
WHERE n.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB)
+ """
ORDER BY n.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [entity_node_from_record(r) for r in records]
async def load_embeddings(
self,
executor: QueryExecutor,
node: EntityNode,
) -> None:
query = """
MATCH (n:Entity {uuid: $uuid})
RETURN n.name_embedding AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=node.uuid)
if len(records) == 0:
raise NodeNotFoundError(node.uuid)
node.name_embedding = records[0]['name_embedding']
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
batch_size: int = 100, # noqa: ARG002
) -> None:
uuids = [n.uuid for n in nodes]
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN DISTINCT n.uuid AS uuid, n.name_embedding AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuids=uuids)
embedding_map = {r['uuid']: r['name_embedding'] for r in records}
for node in nodes:
if node.uuid in embedding_map:
node.name_embedding = embedding_map[node.uuid]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/entity_node_ops.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/episode_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from datetime import datetime
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import episodic_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
EPISODIC_NODE_RETURN,
get_episode_node_save_bulk_query,
get_episode_node_save_query,
)
from graphiti_core.nodes import EpisodicNode
logger = logging.getLogger(__name__)
class FalkorEpisodeNodeOperations(EpisodeNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
query = get_episode_node_save_query(GraphProvider.FALKORDB)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'source_description': node.source_description,
'content': node.content,
'entity_edges': node.entity_edges,
'created_at': node.created_at,
'valid_at': node.valid_at,
'source': node.source.value,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Episode to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EpisodicNode],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
episodes = []
for node in nodes:
ep = dict(node)
ep['source'] = str(ep['source'].value)
ep.pop('labels', None)
episodes.append(ep)
query = get_episode_node_save_bulk_query(GraphProvider.FALKORDB)
if tx is not None:
await tx.run(query, episodes=episodes)
else:
await executor.execute_query(query, episodes=episodes)
async def delete(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
OPTIONAL MATCH (n)-[r]-()
WITH collect(r.uuid) AS edge_uuids, n
DETACH DELETE n
RETURN edge_uuids
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Episodic {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Episodic)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicNode:
query = (
"""
MATCH (e:Episodic {uuid: $uuid})
RETURN
"""
+ EPISODIC_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
episodes = [episodic_node_from_record(r) for r in records]
if len(episodes) == 0:
raise NodeNotFoundError(uuid)
return episodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicNode]:
query = (
"""
MATCH (e:Episodic)
WHERE e.uuid IN $uuids
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [episodic_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicNode]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (e:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [episodic_node_from_record(r) for r in records]
async def get_by_entity_node_uuid(
self,
executor: QueryExecutor,
entity_node_uuid: str,
) -> list[EpisodicNode]:
query = (
"""
MATCH (e:Episodic)-[r:MENTIONS]->(n:Entity {uuid: $entity_node_uuid})
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, entity_node_uuid=entity_node_uuid)
return [episodic_node_from_record(r) for r in records]
async def retrieve_episodes(
self,
executor: QueryExecutor,
reference_time: datetime,
last_n: int = 3,
group_ids: list[str] | None = None,
source: str | None = None,
saga: str | None = None,
) -> list[EpisodicNode]:
if saga is not None and group_ids is not None and len(group_ids) > 0:
source_clause = 'AND e.source = $source' if source else ''
query = (
"""
MATCH (s:Saga {name: $saga_name, group_id: $group_id})-[:HAS_EPISODE]->(e:Episodic)
WHERE e.valid_at <= $reference_time
"""
+ source_clause
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY e.valid_at DESC
LIMIT $num_episodes
"""
)
records, _, _ = await executor.execute_query(
query,
saga_name=saga,
group_id=group_ids[0],
reference_time=reference_time,
source=source,
num_episodes=last_n,
)
else:
source_clause = 'AND e.source = $source' if source else ''
group_clause = 'AND e.group_id IN $group_ids' if group_ids else ''
query = (
"""
MATCH (e:Episodic)
WHERE e.valid_at <= $reference_time
"""
+ group_clause
+ source_clause
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY e.valid_at DESC
LIMIT $num_episodes
"""
)
records, _, _ = await executor.execute_query(
query,
reference_time=reference_time,
group_ids=group_ids,
source=source,
num_episodes=last_n,
)
return [episodic_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/episode_node_ops.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/episodic_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import EpisodicEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
EPISODIC_EDGE_RETURN,
EPISODIC_EDGE_SAVE,
get_episodic_edge_save_bulk_query,
)
logger = logging.getLogger(__name__)
def _episodic_edge_from_record(record: Any) -> EpisodicEdge:
return EpisodicEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class FalkorEpisodicEdgeOperations(EpisodicEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'episode_uuid': edge.source_node_uuid,
'entity_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(EPISODIC_EDGE_SAVE, **params)
else:
await executor.execute_query(EPISODIC_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EpisodicEdge],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = get_episodic_edge_save_bulk_query(GraphProvider.FALKORDB)
edge_dicts = [e.model_dump() for e in edges]
if tx is not None:
await tx.run(query, episodic_edges=edge_dicts)
else:
await executor.execute_query(query, episodic_edges=edge_dicts)
async def delete(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicEdge:
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS {uuid: $uuid}]->(m:Entity)
RETURN
"""
+ EPISODIC_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_episodic_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicEdge]:
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS]->(m:Entity)
WHERE e.uuid IN $uuids
RETURN
"""
+ EPISODIC_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_episodic_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS]->(m:Entity)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ EPISODIC_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_episodic_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/episodic_edge_ops.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/graph_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.graph_ops import GraphMaintenanceOperations
from graphiti_core.driver.operations.graph_utils import Neighbor, label_propagation
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.driver.record_parsers import community_node_from_record, entity_node_from_record
from graphiti_core.graph_queries import get_fulltext_indices, get_range_indices
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN,
get_entity_node_return_query,
)
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
logger = logging.getLogger(__name__)
class FalkorGraphMaintenanceOperations(GraphMaintenanceOperations):
async def clear_data(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> None:
if group_ids is None:
await executor.execute_query('MATCH (n) DETACH DELETE n')
else:
# FalkorDB: iterate labels individually
for label in ['Entity', 'Episodic', 'Community']:
await executor.execute_query(
f"""
MATCH (n:{label})
WHERE n.group_id IN $group_ids
DETACH DELETE n
""",
group_ids=group_ids,
)
async def build_indices_and_constraints(
self,
executor: QueryExecutor,
delete_existing: bool = False,
) -> None:
if delete_existing:
await self.delete_all_indexes(executor)
range_indices = get_range_indices(GraphProvider.FALKORDB)
fulltext_indices = get_fulltext_indices(GraphProvider.FALKORDB)
index_queries = range_indices + fulltext_indices
# FalkorDB executes indices sequentially (catches "already indexed" in execute_query)
for query in index_queries:
await executor.execute_query(query)
async def delete_all_indexes(
self,
executor: QueryExecutor,
) -> None:
result = await executor.execute_query('CALL db.indexes()')
if not result:
return
records, _, _ = result
drop_tasks = []
for record in records:
label = record['label']
entity_type = record['entitytype']
for field_name, index_type in record['types'].items():
if 'RANGE' in index_type:
drop_tasks.append(
executor.execute_query(f'DROP INDEX ON :{label}({field_name})')
)
elif 'FULLTEXT' in index_type:
if entity_type == 'NODE':
drop_tasks.append(
executor.execute_query(
f'DROP FULLTEXT INDEX FOR (n:{label}) ON (n.{field_name})'
)
)
elif entity_type == 'RELATIONSHIP':
drop_tasks.append(
executor.execute_query(
f'DROP FULLTEXT INDEX FOR ()-[e:{label}]-() ON (e.{field_name})'
)
)
if drop_tasks:
await asyncio.gather(*drop_tasks)
async def get_community_clusters(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> list[Any]:
community_clusters: list[list[EntityNode]] = []
if group_ids is None:
group_id_values, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.group_id IS NOT NULL
RETURN
collect(DISTINCT n.group_id) AS group_ids
"""
)
group_ids = group_id_values[0]['group_ids'] if group_id_values else []
resolved_group_ids: list[str] = group_ids or []
for group_id in resolved_group_ids:
projection: dict[str, list[Neighbor]] = {}
node_records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.group_id IN $group_ids
RETURN
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB),
group_ids=[group_id],
)
nodes = [entity_node_from_record(r) for r in node_records]
for node in nodes:
records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity {group_id: $group_id, uuid: $uuid})-[e:RELATES_TO]-(m: Entity {group_id: $group_id})
WITH count(e) AS count, m.uuid AS uuid
RETURN
uuid,
count
""",
uuid=node.uuid,
group_id=group_id,
)
projection[node.uuid] = [
Neighbor(node_uuid=record['uuid'], edge_count=record['count'])
for record in records
]
cluster_uuids = label_propagation(projection)
for cluster in cluster_uuids:
if not cluster:
continue
cluster_records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB),
uuids=cluster,
)
community_clusters.append([entity_node_from_record(r) for r in cluster_records])
return community_clusters
async def remove_communities(
self,
executor: QueryExecutor,
) -> None:
await executor.execute_query(
"""
MATCH (c:Community)
DETACH DELETE c
"""
)
async def determine_entity_community(
self,
executor: QueryExecutor,
entity: EntityNode,
) -> None:
# Check if the node is already part of a community
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(n:Entity {uuid: $entity_uuid})
RETURN
"""
+ COMMUNITY_NODE_RETURN,
entity_uuid=entity.uuid,
)
if len(records) > 0:
return
# If the node has no community, find the mode community of surrounding entities
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(m:Entity)-[:RELATES_TO]-(n:Entity {uuid: $entity_uuid})
RETURN
"""
+ COMMUNITY_NODE_RETURN,
entity_uuid=entity.uuid,
)
async def get_mentioned_nodes(
self,
executor: QueryExecutor,
episodes: list[EpisodicNode],
) -> list[EntityNode]:
episode_uuids = [episode.uuid for episode in episodes]
records, _, _ = await executor.execute_query(
"""
MATCH (episode:Episodic)-[:MENTIONS]->(n:Entity)
WHERE episode.uuid IN $uuids
RETURN DISTINCT
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB),
uuids=episode_uuids,
)
return [entity_node_from_record(r) for r in records]
async def get_communities_by_nodes(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
) -> list[CommunityNode]:
node_uuids = [node.uuid for node in nodes]
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(m:Entity)
WHERE m.uuid IN $uuids
RETURN DISTINCT
"""
+ COMMUNITY_NODE_RETURN,
uuids=node_uuids,
)
return [community_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/graph_ops.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/has_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import HasEpisodeEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
HAS_EPISODE_EDGE_RETURN,
HAS_EPISODE_EDGE_SAVE,
)
logger = logging.getLogger(__name__)
def _has_episode_edge_from_record(record: Any) -> HasEpisodeEdge:
return HasEpisodeEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class FalkorHasEpisodeEdgeOperations(HasEpisodeEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'saga_uuid': edge.source_node_uuid,
'episode_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(HAS_EPISODE_EDGE_SAVE, **params)
else:
await executor.execute_query(HAS_EPISODE_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[HasEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
for edge in edges:
await self.save(executor, edge, tx=tx)
async def delete(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga)-[e:HAS_EPISODE {uuid: $uuid}]->(m:Episodic)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> HasEpisodeEdge:
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE {uuid: $uuid}]->(m:Episodic)
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_has_episode_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[HasEpisodeEdge]:
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_has_episode_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[HasEpisodeEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_has_episode_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/has_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/next_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import NextEpisodeEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
NEXT_EPISODE_EDGE_RETURN,
NEXT_EPISODE_EDGE_SAVE,
)
logger = logging.getLogger(__name__)
def _next_episode_edge_from_record(record: Any) -> NextEpisodeEdge:
return NextEpisodeEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class FalkorNextEpisodeEdgeOperations(NextEpisodeEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'source_episode_uuid': edge.source_node_uuid,
'target_episode_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(NEXT_EPISODE_EDGE_SAVE, **params)
else:
await executor.execute_query(NEXT_EPISODE_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[NextEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
for edge in edges:
await self.save(executor, edge, tx=tx)
async def delete(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Episodic)-[e:NEXT_EPISODE {uuid: $uuid}]->(m:Episodic)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> NextEpisodeEdge:
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE {uuid: $uuid}]->(m:Episodic)
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
edges = [_next_episode_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[NextEpisodeEdge]:
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_next_episode_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[NextEpisodeEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_next_episode_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/next_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/saga_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.nodes.node_db_queries import SAGA_NODE_RETURN, get_saga_node_save_query
from graphiti_core.nodes import SagaNode
logger = logging.getLogger(__name__)
def _saga_node_from_record(record: Any) -> SagaNode:
return SagaNode(
uuid=record['uuid'],
name=record['name'],
group_id=record['group_id'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class FalkorSagaNodeOperations(SagaNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
query = get_saga_node_save_query(GraphProvider.FALKORDB)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'created_at': node.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Saga Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[SagaNode],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
for node in nodes:
await self.save(executor, node, tx=tx)
async def delete(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga {uuid: $uuid})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Saga {group_id: $group_id})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, group_id=group_id)
else:
await executor.execute_query(query, group_id=group_id)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100, # noqa: ARG002
) -> None:
query = """
MATCH (n:Saga)
WHERE n.uuid IN $uuids
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> SagaNode:
query = (
"""
MATCH (s:Saga {uuid: $uuid})
RETURN
"""
+ SAGA_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid)
nodes = [_saga_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[SagaNode]:
query = (
"""
MATCH (s:Saga)
WHERE s.uuid IN $uuids
RETURN
"""
+ SAGA_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids)
return [_saga_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[SagaNode]:
cursor_clause = 'AND s.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (s:Saga)
WHERE s.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ SAGA_NODE_RETURN
+ """
ORDER BY s.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
)
return [_saga_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/saga_node_ops.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/falkordb/operations/search_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.falkordb import STOPWORDS
from graphiti_core.driver.operations.search_ops import SearchOperations
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.driver.record_parsers import (
community_node_from_record,
entity_edge_from_record,
entity_node_from_record,
episodic_node_from_record,
)
from graphiti_core.edges import EntityEdge
from graphiti_core.graph_queries import (
get_nodes_query,
get_relationships_query,
get_vector_cosine_func_query,
)
from graphiti_core.models.edges.edge_db_queries import get_entity_edge_return_query
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN,
EPISODIC_NODE_RETURN,
get_entity_node_return_query,
)
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
from graphiti_core.search.search_filters import (
SearchFilters,
edge_search_filter_query_constructor,
node_search_filter_query_constructor,
)
logger = logging.getLogger(__name__)
MAX_QUERY_LENGTH = 128
# FalkorDB separator characters that break text into tokens
_SEPARATOR_MAP = str.maketrans(
{
',': ' ',
'.': ' ',
'<': ' ',
'>': ' ',
'{': ' ',
'}': ' ',
'[': ' ',
']': ' ',
'"': ' ',
"'": ' ',
':': ' ',
';': ' ',
'!': ' ',
'@': ' ',
'#': ' ',
'$': ' ',
'%': ' ',
'^': ' ',
'&': ' ',
'*': ' ',
'(': ' ',
')': ' ',
'-': ' ',
'+': ' ',
'=': ' ',
'~': ' ',
'?': ' ',
'|': ' ',
'/': ' ',
'\\': ' ',
}
)
def _sanitize(query: str) -> str:
"""Replace FalkorDB special characters with whitespace."""
sanitized = query.translate(_SEPARATOR_MAP)
return ' '.join(sanitized.split())
def _build_falkor_fulltext_query(
query: str,
group_ids: list[str] | None = None,
max_query_length: int = MAX_QUERY_LENGTH,
) -> str:
"""Build a fulltext query string for FalkorDB using RedisSearch syntax."""
if group_ids is None or len(group_ids) == 0:
group_filter = ''
else:
escaped_group_ids = [f'"{gid}"' for gid in group_ids]
group_values = '|'.join(escaped_group_ids)
group_filter = f'(@group_id:{group_values})'
sanitized_query = _sanitize(query)
# Remove stopwords and empty tokens
query_words = sanitized_query.split()
filtered_words = [word for word in query_words if word and word.lower() not in STOPWORDS]
sanitized_query = ' | '.join(filtered_words)
if len(sanitized_query.split(' ')) + len(group_ids or '') >= max_query_length:
return ''
full_query = group_filter + ' (' + sanitized_query + ')'
return full_query
class FalkorSearchOperations(SearchOperations):
# --- Node search ---
async def node_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]:
fuzzy_query = _build_falkor_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.FALKORDB
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
get_nodes_query(
'node_name_and_summary', '$query', limit=limit, provider=GraphProvider.FALKORDB
)
+ 'YIELD node AS n, score'
+ filter_query
+ """
WITH n, score
ORDER BY score DESC
LIMIT $limit
RETURN
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB)
)
records, _, _ = await executor.execute_query(
cypher,
query=fuzzy_query,
limit=limit,
**filter_params,
)
return [entity_node_from_record(r) for r in records]
async def node_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityNode]:
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.FALKORDB
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
'MATCH (n:Entity)'
+ filter_query
+ """
WITH n, """
+ get_vector_cosine_func_query(
'n.name_embedding', '$search_vector', GraphProvider.FALKORDB
)
+ """ AS score
WHERE score > $min_score
RETURN
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB)
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
search_vector=search_vector,
limit=limit,
min_score=min_score,
**filter_params,
)
return [entity_node_from_record(r) for r in records]
async def node_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
search_filter: SearchFilters,
max_depth: int,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]:
if not origin_uuids or max_depth < 1:
return []
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.FALKORDB
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_queries.append('origin.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' AND ' + (' AND '.join(filter_queries))
cypher = (
f"""
UNWIND $bfs_origin_node_uuids AS origin_uuid
MATCH (origin {{uuid: origin_uuid}})-[:RELATES_TO|MENTIONS*1..{max_depth}]->(n:Entity)
WHERE n.group_id = origin.group_id
"""
+ filter_query
+ """
RETURN
"""
+ get_entity_node_return_query(GraphProvider.FALKORDB)
+ """
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
bfs_origin_node_uuids=origin_uuids,
limit=limit,
**filter_params,
)
return [entity_node_from_record(r) for r in records]
# --- Edge search ---
async def edge_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]:
fuzzy_query = _build_falkor_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.FALKORDB
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
get_relationships_query(
'edge_name_and_fact', limit=limit, provider=GraphProvider.FALKORDB
)
+ """
YIELD relationship AS rel, score
MATCH (n:Entity)-[e:RELATES_TO {uuid: rel.uuid}]->(m:Entity)
"""
+ filter_query
+ """
WITH e, score, n, m
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.FALKORDB)
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
query=fuzzy_query,
limit=limit,
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
async def edge_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
source_node_uuid: str | None,
target_node_uuid: str | None,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityEdge]:
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.FALKORDB
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
if source_node_uuid is not None:
filter_params['source_uuid'] = source_node_uuid
filter_queries.append('n.uuid = $source_uuid')
if target_node_uuid is not None:
filter_params['target_uuid'] = target_node_uuid
filter_queries.append('m.uuid = $target_uuid')
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
'MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)'
+ filter_query
+ """
WITH DISTINCT e, n, m, """
+ get_vector_cosine_func_query(
'e.fact_embedding', '$search_vector', GraphProvider.FALKORDB
)
+ """ AS score
WHERE score > $min_score
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.FALKORDB)
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
search_vector=search_vector,
limit=limit,
min_score=min_score,
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
async def edge_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
max_depth: int,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]:
if not origin_uuids:
return []
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.FALKORDB
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
f"""
UNWIND $bfs_origin_node_uuids AS origin_uuid
MATCH path = (origin {{uuid: origin_uuid}})-[:RELATES_TO|MENTIONS*1..{max_depth}]->(:Entity)
UNWIND relationships(path) AS rel
MATCH (n:Entity)-[e:RELATES_TO {{uuid: rel.uuid}}]-(m:Entity)
"""
+ filter_query
+ """
RETURN DISTINCT
"""
+ get_entity_edge_return_query(GraphProvider.FALKORDB)
+ """
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
bfs_origin_node_uuids=origin_uuids,
depth=max_depth,
limit=limit,
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
# --- Episode search ---
async def episode_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters, # noqa: ARG002
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EpisodicNode]:
fuzzy_query = _build_falkor_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query += '\nAND e.group_id IN $group_ids'
filter_params['group_ids'] = group_ids
cypher = (
get_nodes_query(
'episode_content', '$query', limit=limit, provider=GraphProvider.FALKORDB
)
+ """
YIELD node AS episode, score
MATCH (e:Episodic)
WHERE e.uuid = episode.uuid
"""
+ group_filter_query
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher, query=fuzzy_query, limit=limit, **filter_params
)
return [episodic_node_from_record(r) for r in records]
# --- Community search ---
async def community_fulltext_search(
self,
executor: QueryExecutor,
query: str,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[CommunityNode]:
fuzzy_query = _build_falkor_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query = 'WHERE c.group_id IN $group_ids'
filter_params['group_ids'] = group_ids
cypher = (
get_nodes_query(
'community_name', '$query', limit=limit, provider=GraphProvider.FALKORDB
)
+ """
YIELD node AS c, score
WITH c, score
"""
+ group_filter_query
+ """
RETURN
"""
+ COMMUNITY_NODE_RETURN
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher, query=fuzzy_query, limit=limit, **filter_params
)
return [community_node_from_record(r) for r in records]
async def community_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[CommunityNode]:
query_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query += ' WHERE c.group_id IN $group_ids'
query_params['group_ids'] = group_ids
cypher = (
'MATCH (c:Community)'
+ group_filter_query
+ """
WITH c,
"""
+ get_vector_cosine_func_query(
'c.name_embedding', '$search_vector', GraphProvider.FALKORDB
)
+ """ AS score
WHERE score > $min_score
RETURN
"""
+ COMMUNITY_NODE_RETURN
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
search_vector=search_vector,
limit=limit,
min_score=min_score,
**query_params,
)
return [community_node_from_record(r) for r in records]
# --- Rerankers ---
async def node_distance_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
center_node_uuid: str,
min_score: float = 0,
) -> list[EntityNode]:
filtered_uuids = [u for u in node_uuids if u != center_node_uuid]
scores: dict[str, float] = {center_node_uuid: 0.0}
cypher = """
UNWIND $node_uuids AS node_uuid
MATCH (center:Entity {uuid: $center_uuid})-[:RELATES_TO]-(n:Entity {uuid: node_uuid})
RETURN 1 AS score, node_uuid AS uuid
"""
results, _, _ = await executor.execute_query(
cypher,
node_uuids=filtered_uuids,
center_uuid=center_node_uuid,
)
for result in results:
scores[result['uuid']] = result['score']
for uuid in filtered_uuids:
if uuid not in scores:
scores[uuid] = float('inf')
filtered_uuids.sort(key=lambda cur_uuid: scores[cur_uuid])
if center_node_uuid in node_uuids:
scores[center_node_uuid] = 0.1
filtered_uuids = [center_node_uuid] + filtered_uuids
reranked_uuids = [u for u in filtered_uuids if (1 / scores[u]) >= min_score]
if not reranked_uuids:
return []
get_query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(get_query, uuids=reranked_uuids)
node_map = {r['uuid']: entity_node_from_record(r) for r in records}
return [node_map[u] for u in reranked_uuids if u in node_map]
async def episode_mentions_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
min_score: float = 0,
) -> list[EntityNode]:
if not node_uuids:
return []
scores: dict[str, float] = {}
results, _, _ = await executor.execute_query(
"""
UNWIND $node_uuids AS node_uuid
MATCH (episode:Episodic)-[r:MENTIONS]->(n:Entity {uuid: node_uuid})
RETURN count(*) AS score, n.uuid AS uuid
""",
node_uuids=node_uuids,
)
for result in results:
scores[result['uuid']] = result['score']
for uuid in node_uuids:
if uuid not in scores:
scores[uuid] = float('inf')
sorted_uuids = list(node_uuids)
sorted_uuids.sort(key=lambda cur_uuid: scores[cur_uuid])
reranked_uuids = [u for u in sorted_uuids if scores[u] >= min_score]
if not reranked_uuids:
return []
get_query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.FALKORDB)
records, _, _ = await executor.execute_query(get_query, uuids=reranked_uuids)
node_map = {r['uuid']: entity_node_from_record(r) for r in records}
return [node_map[u] for u in reranked_uuids if u in node_map]
# --- Filter builders ---
def build_node_search_filters(self, search_filters: SearchFilters) -> Any:
filter_queries, filter_params = node_search_filter_query_constructor(
search_filters, GraphProvider.FALKORDB
)
return {'filter_queries': filter_queries, 'filter_params': filter_params}
def build_edge_search_filters(self, search_filters: SearchFilters) -> Any:
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filters, GraphProvider.FALKORDB
)
return {'filter_queries': filter_queries, 'filter_params': filter_params}
# --- Fulltext query builder ---
def build_fulltext_query(
self,
query: str,
group_ids: list[str] | None = None,
max_query_length: int = MAX_QUERY_LENGTH,
) -> str:
return _build_falkor_fulltext_query(query, group_ids, max_query_length)
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/falkordb/operations/search_ops.py",
"license": "Apache License 2.0",
"lines": 589,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/community_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.community_edge_ops import CommunityEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import CommunityEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
COMMUNITY_EDGE_RETURN,
get_community_edge_save_query,
)
logger = logging.getLogger(__name__)
def _community_edge_from_record(record: Any) -> CommunityEdge:
return CommunityEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class Neo4jCommunityEdgeOperations(CommunityEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None:
query = get_community_edge_save_query(GraphProvider.NEO4J)
params: dict[str, Any] = {
'community_uuid': edge.source_node_uuid,
'entity_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def delete(
self,
executor: QueryExecutor,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> CommunityEdge:
query = (
"""
MATCH (n:Community)-[e:HAS_MEMBER {uuid: $uuid}]->(m)
RETURN
"""
+ COMMUNITY_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
edges = [_community_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[CommunityEdge]:
query = (
"""
MATCH (n:Community)-[e:HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
RETURN
"""
+ COMMUNITY_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [_community_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Community)-[e:HAS_MEMBER]->(m)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ COMMUNITY_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [_community_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/community_edge_ops.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/community_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.community_node_ops import CommunityNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import community_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN,
get_community_node_save_query,
)
from graphiti_core.nodes import CommunityNode
logger = logging.getLogger(__name__)
class Neo4jCommunityNodeOperations(CommunityNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: CommunityNode,
tx: Transaction | None = None,
) -> None:
query = get_community_node_save_query(GraphProvider.NEO4J)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'summary': node.summary,
'name_embedding': node.name_embedding,
'created_at': node.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Community Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[CommunityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
# Community nodes saved individually since bulk query not in existing codebase
for node in nodes:
await self.save(executor, node, tx=tx)
async def delete(
self,
executor: QueryExecutor,
node: CommunityNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
OPTIONAL MATCH (n)-[r]-()
WITH collect(r.uuid) AS edge_uuids, n
DETACH DELETE n
RETURN edge_uuids
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Community {group_id: $group_id})
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, group_id=group_id, batch_size=batch_size)
else:
await executor.execute_query(query, group_id=group_id, batch_size=batch_size)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Community)
WHERE n.uuid IN $uuids
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, uuids=uuids, batch_size=batch_size)
else:
await executor.execute_query(query, uuids=uuids, batch_size=batch_size)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> CommunityNode:
query = (
"""
MATCH (c:Community {uuid: $uuid})
RETURN
"""
+ COMMUNITY_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
nodes = [community_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[CommunityNode]:
query = (
"""
MATCH (c:Community)
WHERE c.uuid IN $uuids
RETURN
"""
+ COMMUNITY_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [community_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityNode]:
cursor_clause = 'AND c.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (c:Community)
WHERE c.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ COMMUNITY_NODE_RETURN
+ """
ORDER BY c.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [community_node_from_record(r) for r in records]
async def load_name_embedding(
self,
executor: QueryExecutor,
node: CommunityNode,
) -> None:
query = """
MATCH (c:Community {uuid: $uuid})
RETURN c.name_embedding AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=node.uuid, routing_='r')
if len(records) == 0:
raise NodeNotFoundError(node.uuid)
node.name_embedding = records[0]['name_embedding']
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/community_node_ops.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/entity_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import entity_edge_from_record
from graphiti_core.edges import EntityEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.models.edges.edge_db_queries import (
get_entity_edge_return_query,
get_entity_edge_save_bulk_query,
get_entity_edge_save_query,
)
logger = logging.getLogger(__name__)
class Neo4jEntityEdgeOperations(EntityEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
edge_data: dict[str, Any] = {
'uuid': edge.uuid,
'source_uuid': edge.source_node_uuid,
'target_uuid': edge.target_node_uuid,
'name': edge.name,
'fact': edge.fact,
'fact_embedding': edge.fact_embedding,
'group_id': edge.group_id,
'episodes': edge.episodes,
'created_at': edge.created_at,
'expired_at': edge.expired_at,
'valid_at': edge.valid_at,
'invalid_at': edge.invalid_at,
}
edge_data.update(edge.attributes or {})
query = get_entity_edge_save_query(GraphProvider.NEO4J)
if tx is not None:
await tx.run(query, edge_data=edge_data)
else:
await executor.execute_query(query, edge_data=edge_data)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
prepared: list[dict[str, Any]] = []
for edge in edges:
edge_data: dict[str, Any] = {
'uuid': edge.uuid,
'source_node_uuid': edge.source_node_uuid,
'target_node_uuid': edge.target_node_uuid,
'name': edge.name,
'fact': edge.fact,
'fact_embedding': edge.fact_embedding,
'group_id': edge.group_id,
'episodes': edge.episodes,
'created_at': edge.created_at,
'expired_at': edge.expired_at,
'valid_at': edge.valid_at,
'invalid_at': edge.invalid_at,
}
edge_data.update(edge.attributes or {})
prepared.append(edge_data)
query = get_entity_edge_save_bulk_query(GraphProvider.NEO4J)
if tx is not None:
await tx.run(query, entity_edges=prepared)
else:
await executor.execute_query(query, entity_edges=prepared)
async def delete(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityEdge:
query = """
MATCH (n:Entity)-[e:RELATES_TO {uuid: $uuid}]->(m:Entity)
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
edges = [entity_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityEdge]:
if not uuids:
return []
query = """
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.uuid IN $uuids
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [entity_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.NEO4J)
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [entity_edge_from_record(r) for r in records]
async def get_between_nodes(
self,
executor: QueryExecutor,
source_node_uuid: str,
target_node_uuid: str,
) -> list[EntityEdge]:
query = """
MATCH (n:Entity {uuid: $source_node_uuid})-[e:RELATES_TO]->(m:Entity {uuid: $target_node_uuid})
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(
query,
source_node_uuid=source_node_uuid,
target_node_uuid=target_node_uuid,
routing_='r',
)
return [entity_edge_from_record(r) for r in records]
async def get_by_node_uuid(
self,
executor: QueryExecutor,
node_uuid: str,
) -> list[EntityEdge]:
query = """
MATCH (n:Entity {uuid: $node_uuid})-[e:RELATES_TO]-(m:Entity)
RETURN
""" + get_entity_edge_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(query, node_uuid=node_uuid, routing_='r')
return [entity_edge_from_record(r) for r in records]
async def load_embeddings(
self,
executor: QueryExecutor,
edge: EntityEdge,
) -> None:
query = """
MATCH (n:Entity)-[e:RELATES_TO {uuid: $uuid}]->(m:Entity)
RETURN e.fact_embedding AS fact_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=edge.uuid, routing_='r')
if len(records) == 0:
raise EdgeNotFoundError(edge.uuid)
edge.fact_embedding = records[0]['fact_embedding']
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
batch_size: int = 100,
) -> None:
uuids = [e.uuid for e in edges]
query = """
MATCH (n:Entity)-[e:RELATES_TO]-(m:Entity)
WHERE e.uuid IN $edge_uuids
RETURN DISTINCT e.uuid AS uuid, e.fact_embedding AS fact_embedding
"""
records, _, _ = await executor.execute_query(query, edge_uuids=uuids, routing_='r')
embedding_map = {r['uuid']: r['fact_embedding'] for r in records}
for edge in edges:
if edge.uuid in embedding_map:
edge.fact_embedding = embedding_map[edge.uuid]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/entity_edge_ops.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/entity_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import entity_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
get_entity_node_return_query,
get_entity_node_save_bulk_query,
get_entity_node_save_query,
)
from graphiti_core.nodes import EntityNode
logger = logging.getLogger(__name__)
class Neo4jEntityNodeOperations(EntityNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
entity_data: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'name_embedding': node.name_embedding,
'group_id': node.group_id,
'summary': node.summary,
'created_at': node.created_at,
}
entity_data.update(node.attributes or {})
labels = ':'.join(list(set(node.labels + ['Entity'])))
query = get_entity_node_save_query(GraphProvider.NEO4J, labels)
if tx is not None:
await tx.run(query, entity_data=entity_data)
else:
await executor.execute_query(query, entity_data=entity_data)
logger.debug(f'Saved Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
prepared: list[dict[str, Any]] = []
for node in nodes:
entity_data: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'summary': node.summary,
'created_at': node.created_at,
'name_embedding': node.name_embedding,
'labels': list(set(node.labels + ['Entity'])),
}
entity_data.update(node.attributes or {})
prepared.append(entity_data)
query = get_entity_node_save_bulk_query(GraphProvider.NEO4J, prepared)
if tx is not None:
await tx.run(query, nodes=prepared)
else:
await executor.execute_query(query, nodes=prepared)
async def delete(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
OPTIONAL MATCH (n)-[r]-()
WITH collect(r.uuid) AS edge_uuids, n
DETACH DELETE n
RETURN edge_uuids
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Entity {group_id: $group_id})
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, group_id=group_id, batch_size=batch_size)
else:
await executor.execute_query(query, group_id=group_id, batch_size=batch_size)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, uuids=uuids, batch_size=batch_size)
else:
await executor.execute_query(query, uuids=uuids, batch_size=batch_size)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityNode:
query = """
MATCH (n:Entity {uuid: $uuid})
RETURN
""" + get_entity_node_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
nodes = [entity_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityNode]:
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [entity_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityNode]:
cursor_clause = 'AND n.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Entity)
WHERE n.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEO4J)
+ """
ORDER BY n.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [entity_node_from_record(r) for r in records]
async def load_embeddings(
self,
executor: QueryExecutor,
node: EntityNode,
) -> None:
query = """
MATCH (n:Entity {uuid: $uuid})
RETURN n.name_embedding AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuid=node.uuid, routing_='r')
if len(records) == 0:
raise NodeNotFoundError(node.uuid)
node.name_embedding = records[0]['name_embedding']
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
batch_size: int = 100,
) -> None:
uuids = [n.uuid for n in nodes]
query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN DISTINCT n.uuid AS uuid, n.name_embedding AS name_embedding
"""
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
embedding_map = {r['uuid']: r['name_embedding'] for r in records}
for node in nodes:
if node.uuid in embedding_map:
node.name_embedding = embedding_map[node.uuid]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/entity_node_ops.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/episode_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from datetime import datetime
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.record_parsers import episodic_node_from_record
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.models.nodes.node_db_queries import (
EPISODIC_NODE_RETURN,
get_episode_node_save_bulk_query,
get_episode_node_save_query,
)
from graphiti_core.nodes import EpisodicNode
logger = logging.getLogger(__name__)
class Neo4jEpisodeNodeOperations(EpisodeNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
query = get_episode_node_save_query(GraphProvider.NEO4J)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'source_description': node.source_description,
'content': node.content,
'entity_edges': node.entity_edges,
'created_at': node.created_at,
'valid_at': node.valid_at,
'source': node.source.value,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Episode to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EpisodicNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
episodes = []
for node in nodes:
ep = dict(node)
ep['source'] = str(ep['source'].value)
ep.pop('labels', None)
episodes.append(ep)
query = get_episode_node_save_bulk_query(GraphProvider.NEO4J)
if tx is not None:
await tx.run(query, episodes=episodes)
else:
await executor.execute_query(query, episodes=episodes)
async def delete(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n {uuid: $uuid})
WHERE n:Entity OR n:Episodic OR n:Community
OPTIONAL MATCH (n)-[r]-()
WITH collect(r.uuid) AS edge_uuids, n
DETACH DELETE n
RETURN edge_uuids
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Episodic {group_id: $group_id})
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, group_id=group_id, batch_size=batch_size)
else:
await executor.execute_query(query, group_id=group_id, batch_size=batch_size)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Episodic)
WHERE n.uuid IN $uuids
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, uuids=uuids, batch_size=batch_size)
else:
await executor.execute_query(query, uuids=uuids, batch_size=batch_size)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicNode:
query = (
"""
MATCH (e:Episodic {uuid: $uuid})
RETURN
"""
+ EPISODIC_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
episodes = [episodic_node_from_record(r) for r in records]
if len(episodes) == 0:
raise NodeNotFoundError(uuid)
return episodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicNode]:
query = (
"""
MATCH (e:Episodic)
WHERE e.uuid IN $uuids
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [episodic_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicNode]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (e:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [episodic_node_from_record(r) for r in records]
async def get_by_entity_node_uuid(
self,
executor: QueryExecutor,
entity_node_uuid: str,
) -> list[EpisodicNode]:
query = (
"""
MATCH (e:Episodic)-[r:MENTIONS]->(n:Entity {uuid: $entity_node_uuid})
RETURN DISTINCT
"""
+ EPISODIC_NODE_RETURN
)
records, _, _ = await executor.execute_query(
query, entity_node_uuid=entity_node_uuid, routing_='r'
)
return [episodic_node_from_record(r) for r in records]
async def retrieve_episodes(
self,
executor: QueryExecutor,
reference_time: datetime,
last_n: int = 3,
group_ids: list[str] | None = None,
source: str | None = None,
saga: str | None = None,
) -> list[EpisodicNode]:
if saga is not None and group_ids is not None and len(group_ids) > 0:
source_clause = 'AND e.source = $source' if source else ''
query = (
"""
MATCH (s:Saga {name: $saga_name, group_id: $group_id})-[:HAS_EPISODE]->(e:Episodic)
WHERE e.valid_at <= $reference_time
"""
+ source_clause
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY e.valid_at DESC
LIMIT $num_episodes
"""
)
records, _, _ = await executor.execute_query(
query,
saga_name=saga,
group_id=group_ids[0],
reference_time=reference_time,
source=source,
num_episodes=last_n,
routing_='r',
)
else:
source_clause = 'AND e.source = $source' if source else ''
group_clause = 'AND e.group_id IN $group_ids' if group_ids else ''
query = (
"""
MATCH (e:Episodic)
WHERE e.valid_at <= $reference_time
"""
+ group_clause
+ source_clause
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY e.valid_at DESC
LIMIT $num_episodes
"""
)
records, _, _ = await executor.execute_query(
query,
reference_time=reference_time,
group_ids=group_ids,
source=source,
num_episodes=last_n,
routing_='r',
)
return [episodic_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/episode_node_ops.py",
"license": "Apache License 2.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/episodic_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import EpisodicEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
EPISODIC_EDGE_RETURN,
EPISODIC_EDGE_SAVE,
get_episodic_edge_save_bulk_query,
)
logger = logging.getLogger(__name__)
def _episodic_edge_from_record(record: Any) -> EpisodicEdge:
return EpisodicEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class Neo4jEpisodicEdgeOperations(EpisodicEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'episode_uuid': edge.source_node_uuid,
'entity_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(EPISODIC_EDGE_SAVE, **params)
else:
await executor.execute_query(EPISODIC_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EpisodicEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = get_episodic_edge_save_bulk_query(GraphProvider.NEO4J)
edge_dicts = [e.model_dump() for e in edges]
if tx is not None:
await tx.run(query, episodic_edges=edge_dicts)
else:
await executor.execute_query(query, episodic_edges=edge_dicts)
async def delete(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER {uuid: $uuid}]->(m)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n)-[e:MENTIONS|RELATES_TO|HAS_MEMBER]->(m)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicEdge:
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS {uuid: $uuid}]->(m:Entity)
RETURN
"""
+ EPISODIC_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
edges = [_episodic_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicEdge]:
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS]->(m:Entity)
WHERE e.uuid IN $uuids
RETURN
"""
+ EPISODIC_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [_episodic_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Episodic)-[e:MENTIONS]->(m:Entity)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ EPISODIC_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [_episodic_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/episodic_edge_ops.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/graph_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.graph_ops import GraphMaintenanceOperations
from graphiti_core.driver.operations.graph_utils import Neighbor, label_propagation
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.driver.record_parsers import community_node_from_record, entity_node_from_record
from graphiti_core.graph_queries import get_fulltext_indices, get_range_indices
from graphiti_core.helpers import semaphore_gather
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN,
get_entity_node_return_query,
)
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
logger = logging.getLogger(__name__)
class Neo4jGraphMaintenanceOperations(GraphMaintenanceOperations):
async def clear_data(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> None:
if group_ids is None:
await executor.execute_query('MATCH (n) DETACH DELETE n')
else:
for label in ['Entity', 'Episodic', 'Community']:
await executor.execute_query(
f"""
MATCH (n:{label})
WHERE n.group_id IN $group_ids
DETACH DELETE n
""",
group_ids=group_ids,
)
async def build_indices_and_constraints(
self,
executor: QueryExecutor,
delete_existing: bool = False,
) -> None:
if delete_existing:
await self.delete_all_indexes(executor)
range_indices = get_range_indices(GraphProvider.NEO4J)
fulltext_indices = get_fulltext_indices(GraphProvider.NEO4J)
index_queries = range_indices + fulltext_indices
await semaphore_gather(*[executor.execute_query(q) for q in index_queries])
async def delete_all_indexes(
self,
executor: QueryExecutor,
) -> None:
await executor.execute_query('CALL db.indexes() YIELD name DROP INDEX name')
async def get_community_clusters(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> list[Any]:
community_clusters: list[list[EntityNode]] = []
if group_ids is None:
group_id_values, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.group_id IS NOT NULL
RETURN
collect(DISTINCT n.group_id) AS group_ids
"""
)
group_ids = group_id_values[0]['group_ids'] if group_id_values else []
resolved_group_ids: list[str] = group_ids or []
for group_id in resolved_group_ids:
projection: dict[str, list[Neighbor]] = {}
# Get all entity nodes for this group
node_records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.group_id IN $group_ids
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEO4J),
group_ids=[group_id],
routing_='r',
)
nodes = [entity_node_from_record(r) for r in node_records]
for node in nodes:
records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity {group_id: $group_id, uuid: $uuid})-[e:RELATES_TO]-(m: Entity {group_id: $group_id})
WITH count(e) AS count, m.uuid AS uuid
RETURN
uuid,
count
""",
uuid=node.uuid,
group_id=group_id,
)
projection[node.uuid] = [
Neighbor(node_uuid=record['uuid'], edge_count=record['count'])
for record in records
]
cluster_uuids = label_propagation(projection)
# Fetch full node objects for each cluster
for cluster in cluster_uuids:
if not cluster:
continue
cluster_records, _, _ = await executor.execute_query(
"""
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEO4J),
uuids=cluster,
routing_='r',
)
community_clusters.append([entity_node_from_record(r) for r in cluster_records])
return community_clusters
async def remove_communities(
self,
executor: QueryExecutor,
) -> None:
await executor.execute_query(
"""
MATCH (c:Community)
DETACH DELETE c
"""
)
async def determine_entity_community(
self,
executor: QueryExecutor,
entity: EntityNode,
) -> None:
# Check if the node is already part of a community
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(n:Entity {uuid: $entity_uuid})
RETURN
"""
+ COMMUNITY_NODE_RETURN,
entity_uuid=entity.uuid,
)
if len(records) > 0:
return
# If the node has no community, find the mode community of surrounding entities
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(m:Entity)-[:RELATES_TO]-(n:Entity {uuid: $entity_uuid})
RETURN
"""
+ COMMUNITY_NODE_RETURN,
entity_uuid=entity.uuid,
)
async def get_mentioned_nodes(
self,
executor: QueryExecutor,
episodes: list[EpisodicNode],
) -> list[EntityNode]:
episode_uuids = [episode.uuid for episode in episodes]
records, _, _ = await executor.execute_query(
"""
MATCH (episode:Episodic)-[:MENTIONS]->(n:Entity)
WHERE episode.uuid IN $uuids
RETURN DISTINCT
"""
+ get_entity_node_return_query(GraphProvider.NEO4J),
uuids=episode_uuids,
routing_='r',
)
return [entity_node_from_record(r) for r in records]
async def get_communities_by_nodes(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
) -> list[CommunityNode]:
node_uuids = [node.uuid for node in nodes]
records, _, _ = await executor.execute_query(
"""
MATCH (c:Community)-[:HAS_MEMBER]->(m:Entity)
WHERE m.uuid IN $uuids
RETURN DISTINCT
"""
+ COMMUNITY_NODE_RETURN,
uuids=node_uuids,
routing_='r',
)
return [community_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/graph_ops.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/has_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import HasEpisodeEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
HAS_EPISODE_EDGE_RETURN,
HAS_EPISODE_EDGE_SAVE,
)
logger = logging.getLogger(__name__)
def _has_episode_edge_from_record(record: Any) -> HasEpisodeEdge:
return HasEpisodeEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class Neo4jHasEpisodeEdgeOperations(HasEpisodeEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'saga_uuid': edge.source_node_uuid,
'episode_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(HAS_EPISODE_EDGE_SAVE, **params)
else:
await executor.execute_query(HAS_EPISODE_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[HasEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
for edge in edges:
await self.save(executor, edge, tx=tx)
async def delete(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga)-[e:HAS_EPISODE {uuid: $uuid}]->(m:Episodic)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> HasEpisodeEdge:
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE {uuid: $uuid}]->(m:Episodic)
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
edges = [_has_episode_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[HasEpisodeEdge]:
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [_has_episode_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[HasEpisodeEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Saga)-[e:HAS_EPISODE]->(m:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ HAS_EPISODE_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [_has_episode_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/has_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/next_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import NextEpisodeEdge
from graphiti_core.errors import EdgeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.edges.edge_db_queries import (
NEXT_EPISODE_EDGE_RETURN,
NEXT_EPISODE_EDGE_SAVE,
)
logger = logging.getLogger(__name__)
def _next_episode_edge_from_record(record: Any) -> NextEpisodeEdge:
return NextEpisodeEdge(
uuid=record['uuid'],
group_id=record['group_id'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class Neo4jNextEpisodeEdgeOperations(NextEpisodeEdgeOperations):
async def save(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
params: dict[str, Any] = {
'source_episode_uuid': edge.source_node_uuid,
'target_episode_uuid': edge.target_node_uuid,
'uuid': edge.uuid,
'group_id': edge.group_id,
'created_at': edge.created_at,
}
if tx is not None:
await tx.run(NEXT_EPISODE_EDGE_SAVE, **params)
else:
await executor.execute_query(NEXT_EPISODE_EDGE_SAVE, **params)
logger.debug(f'Saved Edge to Graph: {edge.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[NextEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
for edge in edges:
await self.save(executor, edge, tx=tx)
async def delete(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Episodic)-[e:NEXT_EPISODE {uuid: $uuid}]->(m:Episodic)
DELETE e
"""
if tx is not None:
await tx.run(query, uuid=edge.uuid)
else:
await executor.execute_query(query, uuid=edge.uuid)
logger.debug(f'Deleted Edge: {edge.uuid}')
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
DELETE e
"""
if tx is not None:
await tx.run(query, uuids=uuids)
else:
await executor.execute_query(query, uuids=uuids)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> NextEpisodeEdge:
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE {uuid: $uuid}]->(m:Episodic)
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
edges = [_next_episode_edge_from_record(r) for r in records]
if len(edges) == 0:
raise EdgeNotFoundError(uuid)
return edges[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[NextEpisodeEdge]:
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.uuid IN $uuids
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [_next_episode_edge_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[NextEpisodeEdge]:
cursor_clause = 'AND e.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (n:Episodic)-[e:NEXT_EPISODE]->(m:Episodic)
WHERE e.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ NEXT_EPISODE_EDGE_RETURN
+ """
ORDER BY e.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [_next_episode_edge_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/next_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/saga_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.errors import NodeNotFoundError
from graphiti_core.helpers import parse_db_date
from graphiti_core.models.nodes.node_db_queries import SAGA_NODE_RETURN, get_saga_node_save_query
from graphiti_core.nodes import SagaNode
logger = logging.getLogger(__name__)
def _saga_node_from_record(record: Any) -> SagaNode:
return SagaNode(
uuid=record['uuid'],
name=record['name'],
group_id=record['group_id'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
)
class Neo4jSagaNodeOperations(SagaNodeOperations):
async def save(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
query = get_saga_node_save_query(GraphProvider.NEO4J)
params: dict[str, Any] = {
'uuid': node.uuid,
'name': node.name,
'group_id': node.group_id,
'created_at': node.created_at,
}
if tx is not None:
await tx.run(query, **params)
else:
await executor.execute_query(query, **params)
logger.debug(f'Saved Saga Node to Graph: {node.uuid}')
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[SagaNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
for node in nodes:
await self.save(executor, node, tx=tx)
async def delete(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
query = """
MATCH (n:Saga {uuid: $uuid})
DETACH DELETE n
"""
if tx is not None:
await tx.run(query, uuid=node.uuid)
else:
await executor.execute_query(query, uuid=node.uuid)
logger.debug(f'Deleted Node: {node.uuid}')
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Saga {group_id: $group_id})
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, group_id=group_id, batch_size=batch_size)
else:
await executor.execute_query(query, group_id=group_id, batch_size=batch_size)
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
query = """
MATCH (n:Saga)
WHERE n.uuid IN $uuids
CALL (n) {
DETACH DELETE n
} IN TRANSACTIONS OF $batch_size ROWS
"""
if tx is not None:
await tx.run(query, uuids=uuids, batch_size=batch_size)
else:
await executor.execute_query(query, uuids=uuids, batch_size=batch_size)
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> SagaNode:
query = (
"""
MATCH (s:Saga {uuid: $uuid})
RETURN
"""
+ SAGA_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuid=uuid, routing_='r')
nodes = [_saga_node_from_record(r) for r in records]
if len(nodes) == 0:
raise NodeNotFoundError(uuid)
return nodes[0]
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[SagaNode]:
query = (
"""
MATCH (s:Saga)
WHERE s.uuid IN $uuids
RETURN
"""
+ SAGA_NODE_RETURN
)
records, _, _ = await executor.execute_query(query, uuids=uuids, routing_='r')
return [_saga_node_from_record(r) for r in records]
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[SagaNode]:
cursor_clause = 'AND s.uuid < $uuid' if uuid_cursor else ''
limit_clause = 'LIMIT $limit' if limit is not None else ''
query = (
"""
MATCH (s:Saga)
WHERE s.group_id IN $group_ids
"""
+ cursor_clause
+ """
RETURN
"""
+ SAGA_NODE_RETURN
+ """
ORDER BY s.uuid DESC
"""
+ limit_clause
)
records, _, _ = await executor.execute_query(
query,
group_ids=group_ids,
uuid=uuid_cursor,
limit=limit,
routing_='r',
)
return [_saga_node_from_record(r) for r in records]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/saga_node_ops.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neo4j/operations/search_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.driver.operations.search_ops import SearchOperations
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.driver.record_parsers import (
community_node_from_record,
entity_edge_from_record,
entity_node_from_record,
episodic_node_from_record,
)
from graphiti_core.edges import EntityEdge
from graphiti_core.graph_queries import (
get_nodes_query,
get_relationships_query,
get_vector_cosine_func_query,
)
from graphiti_core.helpers import lucene_sanitize
from graphiti_core.models.edges.edge_db_queries import get_entity_edge_return_query
from graphiti_core.models.nodes.node_db_queries import (
COMMUNITY_NODE_RETURN,
EPISODIC_NODE_RETURN,
get_entity_node_return_query,
)
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
from graphiti_core.search.search_filters import (
SearchFilters,
edge_search_filter_query_constructor,
node_search_filter_query_constructor,
)
logger = logging.getLogger(__name__)
MAX_QUERY_LENGTH = 128
def _build_neo4j_fulltext_query(
query: str,
group_ids: list[str] | None = None,
max_query_length: int = MAX_QUERY_LENGTH,
) -> str:
group_ids_filter_list = [f'group_id:"{g}"' for g in group_ids] if group_ids is not None else []
group_ids_filter = ''
for f in group_ids_filter_list:
group_ids_filter += f if not group_ids_filter else f' OR {f}'
group_ids_filter += ' AND ' if group_ids_filter else ''
lucene_query = lucene_sanitize(query)
if len(lucene_query.split(' ')) + len(group_ids or '') >= max_query_length:
return ''
full_query = group_ids_filter + '(' + lucene_query + ')'
return full_query
class Neo4jSearchOperations(SearchOperations):
# --- Node search ---
async def node_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]:
fuzzy_query = _build_neo4j_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.NEO4J
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
get_nodes_query(
'node_name_and_summary', '$query', limit=limit, provider=GraphProvider.NEO4J
)
+ 'YIELD node AS n, score'
+ filter_query
+ """
WITH n, score
ORDER BY score DESC
LIMIT $limit
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEO4J)
)
records, _, _ = await executor.execute_query(
cypher,
query=fuzzy_query,
limit=limit,
routing_='r',
**filter_params,
)
return [entity_node_from_record(r) for r in records]
async def node_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityNode]:
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.NEO4J
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
'MATCH (n:Entity)'
+ filter_query
+ """
WITH n, """
+ get_vector_cosine_func_query(
'n.name_embedding', '$search_vector', GraphProvider.NEO4J
)
+ """ AS score
WHERE score > $min_score
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEO4J)
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
search_vector=search_vector,
limit=limit,
min_score=min_score,
routing_='r',
**filter_params,
)
return [entity_node_from_record(r) for r in records]
async def node_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
search_filter: SearchFilters,
max_depth: int,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]:
if not origin_uuids or max_depth < 1:
return []
filter_queries, filter_params = node_search_filter_query_constructor(
search_filter, GraphProvider.NEO4J
)
if group_ids is not None:
filter_queries.append('n.group_id IN $group_ids')
filter_queries.append('origin.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' AND ' + (' AND '.join(filter_queries))
cypher = (
f"""
UNWIND $bfs_origin_node_uuids AS origin_uuid
MATCH (origin {{uuid: origin_uuid}})-[:RELATES_TO|MENTIONS*1..{max_depth}]->(n:Entity)
WHERE n.group_id = origin.group_id
"""
+ filter_query
+ """
RETURN
"""
+ get_entity_node_return_query(GraphProvider.NEO4J)
+ """
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
bfs_origin_node_uuids=origin_uuids,
limit=limit,
routing_='r',
**filter_params,
)
return [entity_node_from_record(r) for r in records]
# --- Edge search ---
async def edge_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]:
fuzzy_query = _build_neo4j_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.NEO4J
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
get_relationships_query('edge_name_and_fact', limit=limit, provider=GraphProvider.NEO4J)
+ """
YIELD relationship AS rel, score
MATCH (n:Entity)-[e:RELATES_TO {uuid: rel.uuid}]->(m:Entity)
"""
+ filter_query
+ """
WITH e, score, n, m
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.NEO4J)
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
query=fuzzy_query,
limit=limit,
routing_='r',
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
async def edge_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
source_node_uuid: str | None,
target_node_uuid: str | None,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityEdge]:
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.NEO4J
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
if source_node_uuid is not None:
filter_params['source_uuid'] = source_node_uuid
filter_queries.append('n.uuid = $source_uuid')
if target_node_uuid is not None:
filter_params['target_uuid'] = target_node_uuid
filter_queries.append('m.uuid = $target_uuid')
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
'MATCH (n:Entity)-[e:RELATES_TO]->(m:Entity)'
+ filter_query
+ """
WITH DISTINCT e, n, m, """
+ get_vector_cosine_func_query(
'e.fact_embedding', '$search_vector', GraphProvider.NEO4J
)
+ """ AS score
WHERE score > $min_score
RETURN
"""
+ get_entity_edge_return_query(GraphProvider.NEO4J)
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
search_vector=search_vector,
limit=limit,
min_score=min_score,
routing_='r',
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
async def edge_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
max_depth: int,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]:
if not origin_uuids:
return []
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filter, GraphProvider.NEO4J
)
if group_ids is not None:
filter_queries.append('e.group_id IN $group_ids')
filter_params['group_ids'] = group_ids
filter_query = ''
if filter_queries:
filter_query = ' WHERE ' + (' AND '.join(filter_queries))
cypher = (
f"""
UNWIND $bfs_origin_node_uuids AS origin_uuid
MATCH path = (origin {{uuid: origin_uuid}})-[:RELATES_TO|MENTIONS*1..{max_depth}]->(:Entity)
UNWIND relationships(path) AS rel
MATCH (n:Entity)-[e:RELATES_TO {{uuid: rel.uuid}}]-(m:Entity)
"""
+ filter_query
+ """
RETURN DISTINCT
"""
+ get_entity_edge_return_query(GraphProvider.NEO4J)
+ """
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
bfs_origin_node_uuids=origin_uuids,
depth=max_depth,
limit=limit,
routing_='r',
**filter_params,
)
return [entity_edge_from_record(r) for r in records]
# --- Episode search ---
async def episode_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters, # noqa: ARG002
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EpisodicNode]:
fuzzy_query = _build_neo4j_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query += '\nAND e.group_id IN $group_ids'
filter_params['group_ids'] = group_ids
cypher = (
get_nodes_query('episode_content', '$query', limit=limit, provider=GraphProvider.NEO4J)
+ """
YIELD node AS episode, score
MATCH (e:Episodic)
WHERE e.uuid = episode.uuid
"""
+ group_filter_query
+ """
RETURN
"""
+ EPISODIC_NODE_RETURN
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher, query=fuzzy_query, limit=limit, routing_='r', **filter_params
)
return [episodic_node_from_record(r) for r in records]
# --- Community search ---
async def community_fulltext_search(
self,
executor: QueryExecutor,
query: str,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[CommunityNode]:
fuzzy_query = _build_neo4j_fulltext_query(query, group_ids)
if fuzzy_query == '':
return []
filter_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query = 'WHERE c.group_id IN $group_ids'
filter_params['group_ids'] = group_ids
cypher = (
get_nodes_query('community_name', '$query', limit=limit, provider=GraphProvider.NEO4J)
+ """
YIELD node AS c, score
WITH c, score
"""
+ group_filter_query
+ """
RETURN
"""
+ COMMUNITY_NODE_RETURN
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher, query=fuzzy_query, limit=limit, routing_='r', **filter_params
)
return [community_node_from_record(r) for r in records]
async def community_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[CommunityNode]:
query_params: dict[str, Any] = {}
group_filter_query = ''
if group_ids is not None:
group_filter_query += ' WHERE c.group_id IN $group_ids'
query_params['group_ids'] = group_ids
cypher = (
'MATCH (c:Community)'
+ group_filter_query
+ """
WITH c,
"""
+ get_vector_cosine_func_query(
'c.name_embedding', '$search_vector', GraphProvider.NEO4J
)
+ """ AS score
WHERE score > $min_score
RETURN
"""
+ COMMUNITY_NODE_RETURN
+ """
ORDER BY score DESC
LIMIT $limit
"""
)
records, _, _ = await executor.execute_query(
cypher,
search_vector=search_vector,
limit=limit,
min_score=min_score,
routing_='r',
**query_params,
)
return [community_node_from_record(r) for r in records]
# --- Rerankers ---
async def node_distance_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
center_node_uuid: str,
min_score: float = 0,
) -> list[EntityNode]:
filtered_uuids = [u for u in node_uuids if u != center_node_uuid]
scores: dict[str, float] = {center_node_uuid: 0.0}
cypher = """
UNWIND $node_uuids AS node_uuid
MATCH (center:Entity {uuid: $center_uuid})-[:RELATES_TO]-(n:Entity {uuid: node_uuid})
RETURN 1 AS score, node_uuid AS uuid
"""
results, _, _ = await executor.execute_query(
cypher,
node_uuids=filtered_uuids,
center_uuid=center_node_uuid,
routing_='r',
)
for result in results:
scores[result['uuid']] = result['score']
for uuid in filtered_uuids:
if uuid not in scores:
scores[uuid] = float('inf')
filtered_uuids.sort(key=lambda cur_uuid: scores[cur_uuid])
if center_node_uuid in node_uuids:
scores[center_node_uuid] = 0.1
filtered_uuids = [center_node_uuid] + filtered_uuids
reranked_uuids = [u for u in filtered_uuids if (1 / scores[u]) >= min_score]
if not reranked_uuids:
return []
# Fetch the actual EntityNode objects
get_query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(get_query, uuids=reranked_uuids, routing_='r')
node_map = {r['uuid']: entity_node_from_record(r) for r in records}
return [node_map[u] for u in reranked_uuids if u in node_map]
async def episode_mentions_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
min_score: float = 0,
) -> list[EntityNode]:
if not node_uuids:
return []
scores: dict[str, float] = {}
results, _, _ = await executor.execute_query(
"""
UNWIND $node_uuids AS node_uuid
MATCH (episode:Episodic)-[r:MENTIONS]->(n:Entity {uuid: node_uuid})
RETURN count(*) AS score, n.uuid AS uuid
""",
node_uuids=node_uuids,
routing_='r',
)
for result in results:
scores[result['uuid']] = result['score']
for uuid in node_uuids:
if uuid not in scores:
scores[uuid] = float('inf')
sorted_uuids = list(node_uuids)
sorted_uuids.sort(key=lambda cur_uuid: scores[cur_uuid])
reranked_uuids = [u for u in sorted_uuids if scores[u] >= min_score]
if not reranked_uuids:
return []
# Fetch the actual EntityNode objects
get_query = """
MATCH (n:Entity)
WHERE n.uuid IN $uuids
RETURN
""" + get_entity_node_return_query(GraphProvider.NEO4J)
records, _, _ = await executor.execute_query(get_query, uuids=reranked_uuids, routing_='r')
node_map = {r['uuid']: entity_node_from_record(r) for r in records}
return [node_map[u] for u in reranked_uuids if u in node_map]
# --- Filter builders ---
def build_node_search_filters(self, search_filters: SearchFilters) -> Any:
filter_queries, filter_params = node_search_filter_query_constructor(
search_filters, GraphProvider.NEO4J
)
return {'filter_queries': filter_queries, 'filter_params': filter_params}
def build_edge_search_filters(self, search_filters: SearchFilters) -> Any:
filter_queries, filter_params = edge_search_filter_query_constructor(
search_filters, GraphProvider.NEO4J
)
return {'filter_queries': filter_queries, 'filter_params': filter_params}
# --- Fulltext query builder ---
def build_fulltext_query(
self,
query: str,
group_ids: list[str] | None = None,
max_query_length: int = 8000,
) -> str:
return _build_neo4j_fulltext_query(query, group_ids, max_query_length)
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neo4j/operations/search_ops.py",
"license": "Apache License 2.0",
"lines": 549,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/operations/community_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import CommunityEdge
class CommunityEdgeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> CommunityEdge: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[CommunityEdge]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityEdge]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/community_edge_ops.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/community_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.nodes import CommunityNode
class CommunityNodeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
node: CommunityNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[CommunityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
node: CommunityNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> CommunityNode: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[CommunityNode]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityNode]: ...
@abstractmethod
async def load_name_embedding(
self,
executor: QueryExecutor,
node: CommunityNode,
) -> None: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/community_node_ops.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/entity_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import EntityEdge
class EntityEdgeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityEdge: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityEdge]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityEdge]: ...
@abstractmethod
async def get_between_nodes(
self,
executor: QueryExecutor,
source_node_uuid: str,
target_node_uuid: str,
) -> list[EntityEdge]: ...
@abstractmethod
async def get_by_node_uuid(
self,
executor: QueryExecutor,
node_uuid: str,
) -> list[EntityEdge]: ...
@abstractmethod
async def load_embeddings(
self,
executor: QueryExecutor,
edge: EntityEdge,
) -> None: ...
@abstractmethod
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
edges: list[EntityEdge],
batch_size: int = 100,
) -> None: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/entity_edge_ops.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/entity_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.nodes import EntityNode
class EntityNodeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
node: EntityNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EntityNode: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EntityNode]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityNode]: ...
@abstractmethod
async def load_embeddings(
self,
executor: QueryExecutor,
node: EntityNode,
) -> None: ...
@abstractmethod
async def load_embeddings_bulk(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
batch_size: int = 100,
) -> None: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/entity_node_ops.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/episode_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from datetime import datetime
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.nodes import EpisodicNode
class EpisodeNodeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[EpisodicNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicNode: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicNode]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicNode]: ...
@abstractmethod
async def get_by_entity_node_uuid(
self,
executor: QueryExecutor,
entity_node_uuid: str,
) -> list[EpisodicNode]: ...
@abstractmethod
async def retrieve_episodes(
self,
executor: QueryExecutor,
reference_time: datetime,
last_n: int = 3,
group_ids: list[str] | None = None,
source: str | None = None,
saga: str | None = None,
) -> list[EpisodicNode]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/episode_node_ops.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/episodic_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import EpisodicEdge
class EpisodicEdgeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[EpisodicEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> EpisodicEdge: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[EpisodicEdge]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicEdge]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/episodic_edge_ops.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/graph_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from typing import Any
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
class GraphMaintenanceOperations(ABC):
@abstractmethod
async def clear_data(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> None: ...
@abstractmethod
async def build_indices_and_constraints(
self,
executor: QueryExecutor,
delete_existing: bool = False,
) -> None: ...
@abstractmethod
async def delete_all_indexes(
self,
executor: QueryExecutor,
) -> None: ...
@abstractmethod
async def get_community_clusters(
self,
executor: QueryExecutor,
group_ids: list[str] | None = None,
) -> list[Any]: ...
@abstractmethod
async def remove_communities(
self,
executor: QueryExecutor,
) -> None: ...
@abstractmethod
async def determine_entity_community(
self,
executor: QueryExecutor,
entity: EntityNode,
) -> None: ...
@abstractmethod
async def get_mentioned_nodes(
self,
executor: QueryExecutor,
episodes: list[EpisodicNode],
) -> list[EntityNode]: ...
@abstractmethod
async def get_communities_by_nodes(
self,
executor: QueryExecutor,
nodes: list[EntityNode],
) -> list[CommunityNode]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/graph_ops.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/has_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import HasEpisodeEdge
class HasEpisodeEdgeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[HasEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> HasEpisodeEdge: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[HasEpisodeEdge]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[HasEpisodeEdge]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/has_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/next_episode_edge_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.edges import NextEpisodeEdge
class NextEpisodeEdgeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
edges: list[NextEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> NextEpisodeEdge: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[NextEpisodeEdge]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[NextEpisodeEdge]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/next_episode_edge_ops.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/saga_node_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.nodes import SagaNode
class SagaNodeOperations(ABC):
@abstractmethod
async def save(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def save_bulk(
self,
executor: QueryExecutor,
nodes: list[SagaNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete(
self,
executor: QueryExecutor,
node: SagaNode,
tx: Transaction | None = None,
) -> None: ...
@abstractmethod
async def delete_by_group_id(
self,
executor: QueryExecutor,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def delete_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None: ...
@abstractmethod
async def get_by_uuid(
self,
executor: QueryExecutor,
uuid: str,
) -> SagaNode: ...
@abstractmethod
async def get_by_uuids(
self,
executor: QueryExecutor,
uuids: list[str],
) -> list[SagaNode]: ...
@abstractmethod
async def get_by_group_ids(
self,
executor: QueryExecutor,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[SagaNode]: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/saga_node_ops.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/operations/search_ops.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from typing import Any
from graphiti_core.driver.query_executor import QueryExecutor
from graphiti_core.edges import EntityEdge
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
from graphiti_core.search.search_filters import SearchFilters
class SearchOperations(ABC):
# Node search
@abstractmethod
async def node_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]: ...
@abstractmethod
async def node_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityNode]: ...
@abstractmethod
async def node_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
search_filter: SearchFilters,
max_depth: int,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityNode]: ...
# Edge search
@abstractmethod
async def edge_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]: ...
@abstractmethod
async def edge_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
source_node_uuid: str | None,
target_node_uuid: str | None,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[EntityEdge]: ...
@abstractmethod
async def edge_bfs_search(
self,
executor: QueryExecutor,
origin_uuids: list[str],
max_depth: int,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EntityEdge]: ...
# Episode search
@abstractmethod
async def episode_fulltext_search(
self,
executor: QueryExecutor,
query: str,
search_filter: SearchFilters,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[EpisodicNode]: ...
# Community search
@abstractmethod
async def community_fulltext_search(
self,
executor: QueryExecutor,
query: str,
group_ids: list[str] | None = None,
limit: int = 10,
) -> list[CommunityNode]: ...
@abstractmethod
async def community_similarity_search(
self,
executor: QueryExecutor,
search_vector: list[float],
group_ids: list[str] | None = None,
limit: int = 10,
min_score: float = 0.6,
) -> list[CommunityNode]: ...
# Rerankers
@abstractmethod
async def node_distance_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
center_node_uuid: str,
min_score: float = 0,
) -> list[EntityNode]: ...
@abstractmethod
async def episode_mentions_reranker(
self,
executor: QueryExecutor,
node_uuids: list[str],
min_score: float = 0,
) -> list[EntityNode]: ...
# Filter builders (sync)
@abstractmethod
def build_node_search_filters(self, search_filters: SearchFilters) -> Any: ...
@abstractmethod
def build_edge_search_filters(self, search_filters: SearchFilters) -> Any: ...
# Fulltext query builder
@abstractmethod
def build_fulltext_query(
self,
query: str,
group_ids: list[str] | None = None,
max_query_length: int = 8000,
) -> str: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/operations/search_ops.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/driver/query_executor.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from typing import Any
class Transaction(ABC):
"""Minimal transaction interface yielded by GraphDriver.transaction().
For drivers with real transaction support (e.g., Neo4j), this wraps a native
transaction with commit/rollback semantics. For drivers without transaction
support, this is a thin wrapper where queries execute immediately.
"""
@abstractmethod
async def run(self, query: str, **kwargs: Any) -> Any: ...
class QueryExecutor(ABC):
"""Slim interface for executing queries against a graph database.
GraphDriver extends this. Operations ABCs depend only on QueryExecutor
(not GraphDriver), which avoids circular imports.
"""
@abstractmethod
async def execute_query(self, cypher_query_: str, **kwargs: Any) -> Any: ...
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/query_executor.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:graphiti_core/driver/record_parsers.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
from graphiti_core.edges import EntityEdge
from graphiti_core.helpers import parse_db_date
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodeType, EpisodicNode
def entity_node_from_record(record: Any) -> EntityNode:
"""Parse an entity node from a database record."""
attributes = record['attributes']
attributes.pop('uuid', None)
attributes.pop('name', None)
attributes.pop('group_id', None)
attributes.pop('name_embedding', None)
attributes.pop('summary', None)
attributes.pop('created_at', None)
attributes.pop('labels', None)
labels = record.get('labels', [])
group_id = record.get('group_id')
dynamic_label = 'Entity_' + group_id.replace('-', '')
if dynamic_label in labels:
labels.remove(dynamic_label)
return EntityNode(
uuid=record['uuid'],
name=record['name'],
name_embedding=record.get('name_embedding'),
group_id=group_id,
labels=labels,
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
summary=record['summary'],
attributes=attributes,
)
def entity_edge_from_record(record: Any) -> EntityEdge:
"""Parse an entity edge from a database record."""
attributes = record['attributes']
attributes.pop('uuid', None)
attributes.pop('source_node_uuid', None)
attributes.pop('target_node_uuid', None)
attributes.pop('fact', None)
attributes.pop('fact_embedding', None)
attributes.pop('name', None)
attributes.pop('group_id', None)
attributes.pop('episodes', None)
attributes.pop('created_at', None)
attributes.pop('expired_at', None)
attributes.pop('valid_at', None)
attributes.pop('invalid_at', None)
return EntityEdge(
uuid=record['uuid'],
source_node_uuid=record['source_node_uuid'],
target_node_uuid=record['target_node_uuid'],
fact=record['fact'],
fact_embedding=record.get('fact_embedding'),
name=record['name'],
group_id=record['group_id'],
episodes=record['episodes'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
expired_at=parse_db_date(record['expired_at']),
valid_at=parse_db_date(record['valid_at']),
invalid_at=parse_db_date(record['invalid_at']),
attributes=attributes,
)
def episodic_node_from_record(record: Any) -> EpisodicNode:
"""Parse an episodic node from a database record."""
created_at = parse_db_date(record['created_at'])
valid_at = parse_db_date(record['valid_at'])
if created_at is None:
raise ValueError(f'created_at cannot be None for episode {record.get("uuid", "unknown")}')
if valid_at is None:
raise ValueError(f'valid_at cannot be None for episode {record.get("uuid", "unknown")}')
return EpisodicNode(
content=record['content'],
created_at=created_at,
valid_at=valid_at,
uuid=record['uuid'],
group_id=record['group_id'],
source=EpisodeType.from_str(record['source']),
name=record['name'],
source_description=record['source_description'],
entity_edges=record['entity_edges'],
)
def community_node_from_record(record: Any) -> CommunityNode:
"""Parse a community node from a database record."""
return CommunityNode(
uuid=record['uuid'],
name=record['name'],
group_id=record['group_id'],
name_embedding=record['name_embedding'],
created_at=parse_db_date(record['created_at']), # type: ignore[arg-type]
summary=record['summary'],
)
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/record_parsers.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:graphiti_core/namespaces/edges.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from graphiti_core.driver.driver import GraphDriver
from graphiti_core.driver.operations.community_edge_ops import CommunityEdgeOperations
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.query_executor import Transaction
from graphiti_core.edges import (
CommunityEdge,
EntityEdge,
EpisodicEdge,
HasEpisodeEdge,
NextEpisodeEdge,
)
from graphiti_core.embedder import EmbedderClient
class EntityEdgeNamespace:
"""Namespace for entity edge operations. Accessed as ``graphiti.edges.entity``."""
def __init__(
self,
driver: GraphDriver,
ops: EntityEdgeOperations,
embedder: EmbedderClient,
):
self._driver = driver
self._ops = ops
self._embedder = embedder
async def save(
self,
edge: EntityEdge,
tx: Transaction | None = None,
) -> EntityEdge:
await edge.generate_embedding(self._embedder)
await self._ops.save(self._driver, edge, tx=tx)
return edge
async def save_bulk(
self,
edges: list[EntityEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, edges, tx=tx, batch_size=batch_size)
async def delete(
self,
edge: EntityEdge,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, edge, tx=tx)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx)
async def get_by_uuid(self, uuid: str) -> EntityEdge:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[EntityEdge]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityEdge]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
async def get_between_nodes(
self,
source_node_uuid: str,
target_node_uuid: str,
) -> list[EntityEdge]:
return await self._ops.get_between_nodes(self._driver, source_node_uuid, target_node_uuid)
async def get_by_node_uuid(self, node_uuid: str) -> list[EntityEdge]:
return await self._ops.get_by_node_uuid(self._driver, node_uuid)
async def load_embeddings(self, edge: EntityEdge) -> None:
await self._ops.load_embeddings(self._driver, edge)
async def load_embeddings_bulk(
self,
edges: list[EntityEdge],
batch_size: int = 100,
) -> None:
await self._ops.load_embeddings_bulk(self._driver, edges, batch_size)
class EpisodicEdgeNamespace:
"""Namespace for episodic edge operations. Accessed as ``graphiti.edges.episodic``."""
def __init__(self, driver: GraphDriver, ops: EpisodicEdgeOperations):
self._driver = driver
self._ops = ops
async def save(
self,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> EpisodicEdge:
await self._ops.save(self._driver, edge, tx=tx)
return edge
async def save_bulk(
self,
edges: list[EpisodicEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, edges, tx=tx, batch_size=batch_size)
async def delete(
self,
edge: EpisodicEdge,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, edge, tx=tx)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx)
async def get_by_uuid(self, uuid: str) -> EpisodicEdge:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[EpisodicEdge]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicEdge]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
class CommunityEdgeNamespace:
"""Namespace for community edge operations. Accessed as ``graphiti.edges.community``."""
def __init__(self, driver: GraphDriver, ops: CommunityEdgeOperations):
self._driver = driver
self._ops = ops
async def save(
self,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> CommunityEdge:
await self._ops.save(self._driver, edge, tx=tx)
return edge
async def delete(
self,
edge: CommunityEdge,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, edge, tx=tx)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx)
async def get_by_uuid(self, uuid: str) -> CommunityEdge:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[CommunityEdge]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityEdge]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
class HasEpisodeEdgeNamespace:
"""Namespace for has_episode edge operations. Accessed as ``graphiti.edges.has_episode``."""
def __init__(self, driver: GraphDriver, ops: HasEpisodeEdgeOperations):
self._driver = driver
self._ops = ops
async def save(
self,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> HasEpisodeEdge:
await self._ops.save(self._driver, edge, tx=tx)
return edge
async def save_bulk(
self,
edges: list[HasEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, edges, tx=tx, batch_size=batch_size)
async def delete(
self,
edge: HasEpisodeEdge,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, edge, tx=tx)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx)
async def get_by_uuid(self, uuid: str) -> HasEpisodeEdge:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[HasEpisodeEdge]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[HasEpisodeEdge]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
class NextEpisodeEdgeNamespace:
"""Namespace for next_episode edge operations. Accessed as ``graphiti.edges.next_episode``."""
def __init__(self, driver: GraphDriver, ops: NextEpisodeEdgeOperations):
self._driver = driver
self._ops = ops
async def save(
self,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> NextEpisodeEdge:
await self._ops.save(self._driver, edge, tx=tx)
return edge
async def save_bulk(
self,
edges: list[NextEpisodeEdge],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, edges, tx=tx, batch_size=batch_size)
async def delete(
self,
edge: NextEpisodeEdge,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, edge, tx=tx)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx)
async def get_by_uuid(self, uuid: str) -> NextEpisodeEdge:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[NextEpisodeEdge]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[NextEpisodeEdge]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
class EdgeNamespace:
"""Namespace for all edge operations. Accessed as ``graphiti.edges``.
Sub-namespaces are set only when the driver provides the corresponding
operations implementation. Accessing an unset attribute raises
``NotImplementedError`` with a clear message.
"""
entity: EntityEdgeNamespace
episodic: EpisodicEdgeNamespace
community: CommunityEdgeNamespace
has_episode: HasEpisodeEdgeNamespace
next_episode: NextEpisodeEdgeNamespace
_driver_name: str
def __init__(self, driver: GraphDriver, embedder: EmbedderClient):
self._driver_name = type(driver).__name__
entity_edge_ops = driver.entity_edge_ops
if entity_edge_ops is not None:
self.entity = EntityEdgeNamespace(driver, entity_edge_ops, embedder)
episodic_edge_ops = driver.episodic_edge_ops
if episodic_edge_ops is not None:
self.episodic = EpisodicEdgeNamespace(driver, episodic_edge_ops)
community_edge_ops = driver.community_edge_ops
if community_edge_ops is not None:
self.community = CommunityEdgeNamespace(driver, community_edge_ops)
has_episode_edge_ops = driver.has_episode_edge_ops
if has_episode_edge_ops is not None:
self.has_episode = HasEpisodeEdgeNamespace(driver, has_episode_edge_ops)
next_episode_edge_ops = driver.next_episode_edge_ops
if next_episode_edge_ops is not None:
self.next_episode = NextEpisodeEdgeNamespace(driver, next_episode_edge_ops)
def __getattr__(self, name: str) -> object:
if name in ('entity', 'episodic', 'community', 'has_episode', 'next_episode'):
raise NotImplementedError(f'{self._driver_name} does not implement {name}_edge_ops')
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/namespaces/edges.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/namespaces/nodes.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from graphiti_core.driver.driver import GraphDriver
from graphiti_core.driver.operations.community_node_ops import CommunityNodeOperations
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.query_executor import Transaction
from graphiti_core.embedder import EmbedderClient
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode, SagaNode
class EntityNodeNamespace:
"""Namespace for entity node operations. Accessed as ``graphiti.nodes.entity``."""
def __init__(
self,
driver: GraphDriver,
ops: EntityNodeOperations,
embedder: EmbedderClient,
):
self._driver = driver
self._ops = ops
self._embedder = embedder
async def save(
self,
node: EntityNode,
tx: Transaction | None = None,
) -> EntityNode:
await node.generate_name_embedding(self._embedder)
await self._ops.save(self._driver, node, tx=tx)
return node
async def save_bulk(
self,
nodes: list[EntityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, nodes, tx=tx, batch_size=batch_size)
async def delete(
self,
node: EntityNode,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, node, tx=tx)
async def delete_by_group_id(
self,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_group_id(self._driver, group_id, tx=tx, batch_size=batch_size)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx, batch_size=batch_size)
async def get_by_uuid(self, uuid: str) -> EntityNode:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[EntityNode]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EntityNode]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
async def load_embeddings(self, node: EntityNode) -> None:
await self._ops.load_embeddings(self._driver, node)
async def load_embeddings_bulk(
self,
nodes: list[EntityNode],
batch_size: int = 100,
) -> None:
await self._ops.load_embeddings_bulk(self._driver, nodes, batch_size)
class EpisodeNodeNamespace:
"""Namespace for episode node operations. Accessed as ``graphiti.nodes.episode``."""
def __init__(self, driver: GraphDriver, ops: EpisodeNodeOperations):
self._driver = driver
self._ops = ops
async def save(
self,
node: EpisodicNode,
tx: Transaction | None = None,
) -> EpisodicNode:
await self._ops.save(self._driver, node, tx=tx)
return node
async def save_bulk(
self,
nodes: list[EpisodicNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, nodes, tx=tx, batch_size=batch_size)
async def delete(
self,
node: EpisodicNode,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, node, tx=tx)
async def delete_by_group_id(
self,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_group_id(self._driver, group_id, tx=tx, batch_size=batch_size)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx, batch_size=batch_size)
async def get_by_uuid(self, uuid: str) -> EpisodicNode:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[EpisodicNode]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[EpisodicNode]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
async def get_by_entity_node_uuid(
self,
entity_node_uuid: str,
) -> list[EpisodicNode]:
return await self._ops.get_by_entity_node_uuid(self._driver, entity_node_uuid)
async def retrieve_episodes(
self,
reference_time: datetime,
last_n: int = 3,
group_ids: list[str] | None = None,
source: str | None = None,
saga: str | None = None,
) -> list[EpisodicNode]:
return await self._ops.retrieve_episodes(
self._driver, reference_time, last_n, group_ids, source, saga
)
class CommunityNodeNamespace:
"""Namespace for community node operations. Accessed as ``graphiti.nodes.community``."""
def __init__(
self,
driver: GraphDriver,
ops: CommunityNodeOperations,
embedder: EmbedderClient,
):
self._driver = driver
self._ops = ops
self._embedder = embedder
async def save(
self,
node: CommunityNode,
tx: Transaction | None = None,
) -> CommunityNode:
await node.generate_name_embedding(self._embedder)
await self._ops.save(self._driver, node, tx=tx)
return node
async def save_bulk(
self,
nodes: list[CommunityNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, nodes, tx=tx, batch_size=batch_size)
async def delete(
self,
node: CommunityNode,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, node, tx=tx)
async def delete_by_group_id(
self,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_group_id(self._driver, group_id, tx=tx, batch_size=batch_size)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx, batch_size=batch_size)
async def get_by_uuid(self, uuid: str) -> CommunityNode:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[CommunityNode]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[CommunityNode]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
async def load_name_embedding(self, node: CommunityNode) -> None:
await self._ops.load_name_embedding(self._driver, node)
class SagaNodeNamespace:
"""Namespace for saga node operations. Accessed as ``graphiti.nodes.saga``."""
def __init__(self, driver: GraphDriver, ops: SagaNodeOperations):
self._driver = driver
self._ops = ops
async def save(
self,
node: SagaNode,
tx: Transaction | None = None,
) -> SagaNode:
await self._ops.save(self._driver, node, tx=tx)
return node
async def save_bulk(
self,
nodes: list[SagaNode],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.save_bulk(self._driver, nodes, tx=tx, batch_size=batch_size)
async def delete(
self,
node: SagaNode,
tx: Transaction | None = None,
) -> None:
await self._ops.delete(self._driver, node, tx=tx)
async def delete_by_group_id(
self,
group_id: str,
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_group_id(self._driver, group_id, tx=tx, batch_size=batch_size)
async def delete_by_uuids(
self,
uuids: list[str],
tx: Transaction | None = None,
batch_size: int = 100,
) -> None:
await self._ops.delete_by_uuids(self._driver, uuids, tx=tx, batch_size=batch_size)
async def get_by_uuid(self, uuid: str) -> SagaNode:
return await self._ops.get_by_uuid(self._driver, uuid)
async def get_by_uuids(self, uuids: list[str]) -> list[SagaNode]:
return await self._ops.get_by_uuids(self._driver, uuids)
async def get_by_group_ids(
self,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[SagaNode]:
return await self._ops.get_by_group_ids(self._driver, group_ids, limit, uuid_cursor)
class NodeNamespace:
"""Namespace for all node operations. Accessed as ``graphiti.nodes``.
Sub-namespaces are set only when the driver provides the corresponding
operations implementation. Accessing an unset attribute raises
``NotImplementedError`` with a clear message.
"""
entity: EntityNodeNamespace
episode: EpisodeNodeNamespace
community: CommunityNodeNamespace
saga: SagaNodeNamespace
_driver_name: str
def __init__(self, driver: GraphDriver, embedder: EmbedderClient):
self._driver_name = type(driver).__name__
entity_node_ops = driver.entity_node_ops
if entity_node_ops is not None:
self.entity = EntityNodeNamespace(driver, entity_node_ops, embedder)
episode_node_ops = driver.episode_node_ops
if episode_node_ops is not None:
self.episode = EpisodeNodeNamespace(driver, episode_node_ops)
community_node_ops = driver.community_node_ops
if community_node_ops is not None:
self.community = CommunityNodeNamespace(driver, community_node_ops, embedder)
saga_node_ops = driver.saga_node_ops
if saga_node_ops is not None:
self.saga = SagaNodeNamespace(driver, saga_node_ops)
def __getattr__(self, name: str) -> object:
if name in ('entity', 'episode', 'community', 'saga'):
raise NotImplementedError(f'{self._driver_name} does not implement {name}_node_ops')
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/namespaces/nodes.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/llm_client/token_tracker.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dataclasses import dataclass
from threading import Lock
@dataclass
class TokenUsage:
"""Token usage for a single LLM call."""
input_tokens: int = 0
output_tokens: int = 0
@property
def total_tokens(self) -> int:
return self.input_tokens + self.output_tokens
@dataclass
class PromptTokenUsage:
"""Accumulated token usage for a specific prompt type."""
prompt_name: str
call_count: int = 0
total_input_tokens: int = 0
total_output_tokens: int = 0
@property
def total_tokens(self) -> int:
return self.total_input_tokens + self.total_output_tokens
@property
def avg_input_tokens(self) -> float:
return self.total_input_tokens / self.call_count if self.call_count > 0 else 0
@property
def avg_output_tokens(self) -> float:
return self.total_output_tokens / self.call_count if self.call_count > 0 else 0
class TokenUsageTracker:
"""Thread-safe tracker for LLM token usage by prompt type."""
def __init__(self):
self._usage: dict[str, PromptTokenUsage] = {}
self._lock = Lock()
def record(self, prompt_name: str | None, input_tokens: int, output_tokens: int) -> None:
"""Record token usage for a prompt.
Args:
prompt_name: Name of the prompt (e.g., 'extract_nodes.extract_message')
input_tokens: Number of input tokens used
output_tokens: Number of output tokens generated
"""
key = prompt_name or 'unknown'
with self._lock:
if key not in self._usage:
self._usage[key] = PromptTokenUsage(prompt_name=key)
self._usage[key].call_count += 1
self._usage[key].total_input_tokens += input_tokens
self._usage[key].total_output_tokens += output_tokens
def get_usage(self) -> dict[str, PromptTokenUsage]:
"""Get a copy of current token usage by prompt type."""
with self._lock:
return {
k: PromptTokenUsage(
prompt_name=v.prompt_name,
call_count=v.call_count,
total_input_tokens=v.total_input_tokens,
total_output_tokens=v.total_output_tokens,
)
for k, v in self._usage.items()
}
def get_total_usage(self) -> TokenUsage:
"""Get total token usage across all prompts."""
with self._lock:
total_input = sum(u.total_input_tokens for u in self._usage.values())
total_output = sum(u.total_output_tokens for u in self._usage.values())
return TokenUsage(input_tokens=total_input, output_tokens=total_output)
def reset(self) -> None:
"""Reset all tracked usage."""
with self._lock:
self._usage.clear()
def print_summary(self, sort_by: str = 'total_tokens') -> None:
"""Print a formatted summary of token usage.
Args:
sort_by: Sort key - 'total_tokens', 'input_tokens', 'output_tokens', 'call_count', or 'prompt_name'
"""
usage = self.get_usage()
if not usage:
print('No token usage recorded.')
return
# Sort usage
sort_keys = {
'total_tokens': lambda x: x[1].total_tokens,
'input_tokens': lambda x: x[1].total_input_tokens,
'output_tokens': lambda x: x[1].total_output_tokens,
'call_count': lambda x: x[1].call_count,
'prompt_name': lambda x: x[0],
}
sort_fn = sort_keys.get(sort_by, sort_keys['total_tokens'])
sorted_usage = sorted(usage.items(), key=sort_fn, reverse=(sort_by != 'prompt_name'))
# Print header
print('\n' + '=' * 100)
print('TOKEN USAGE SUMMARY')
print('=' * 100)
print(
f'{"Prompt Type":<45} {"Calls":>8} {"Input":>12} {"Output":>12} {"Total":>12} {"Avg In":>10} {"Avg Out":>10}'
)
print('-' * 100)
# Print each prompt's usage
for prompt_name, prompt_usage in sorted_usage:
print(
f'{prompt_name:<45} {prompt_usage.call_count:>8} {prompt_usage.total_input_tokens:>12,} '
f'{prompt_usage.total_output_tokens:>12,} {prompt_usage.total_tokens:>12,} '
f'{prompt_usage.avg_input_tokens:>10,.1f} {prompt_usage.avg_output_tokens:>10,.1f}'
)
# Print totals
total = self.get_total_usage()
total_calls = sum(u.call_count for u in usage.values())
print('-' * 100)
print(
f'{"TOTAL":<45} {total_calls:>8} {total.input_tokens:>12,} '
f'{total.output_tokens:>12,} {total.total_tokens:>12,}'
)
print('=' * 100 + '\n')
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/llm_client/token_tracker.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/llm_client/test_token_tracker.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from concurrent.futures import ThreadPoolExecutor
from graphiti_core.llm_client.token_tracker import (
PromptTokenUsage,
TokenUsage,
TokenUsageTracker,
)
class TestTokenUsage:
def test_total_tokens(self):
"""Test that total_tokens correctly sums input and output tokens."""
usage = TokenUsage(input_tokens=100, output_tokens=50)
assert usage.total_tokens == 150
def test_default_values(self):
"""Test that default values are zero."""
usage = TokenUsage()
assert usage.input_tokens == 0
assert usage.output_tokens == 0
assert usage.total_tokens == 0
class TestPromptTokenUsage:
def test_total_tokens(self):
"""Test that total_tokens correctly sums input and output tokens."""
usage = PromptTokenUsage(
prompt_name='test',
call_count=5,
total_input_tokens=1000,
total_output_tokens=500,
)
assert usage.total_tokens == 1500
def test_avg_input_tokens(self):
"""Test average input tokens calculation."""
usage = PromptTokenUsage(
prompt_name='test',
call_count=4,
total_input_tokens=1000,
total_output_tokens=500,
)
assert usage.avg_input_tokens == 250.0
def test_avg_output_tokens(self):
"""Test average output tokens calculation."""
usage = PromptTokenUsage(
prompt_name='test',
call_count=4,
total_input_tokens=1000,
total_output_tokens=500,
)
assert usage.avg_output_tokens == 125.0
def test_avg_tokens_zero_calls(self):
"""Test that average returns 0 when call_count is zero."""
usage = PromptTokenUsage(
prompt_name='test',
call_count=0,
total_input_tokens=0,
total_output_tokens=0,
)
assert usage.avg_input_tokens == 0
assert usage.avg_output_tokens == 0
class TestTokenUsageTracker:
def test_record_new_prompt(self):
"""Test recording usage for a new prompt."""
tracker = TokenUsageTracker()
tracker.record('extract_nodes', 100, 50)
usage = tracker.get_usage()
assert 'extract_nodes' in usage
assert usage['extract_nodes'].call_count == 1
assert usage['extract_nodes'].total_input_tokens == 100
assert usage['extract_nodes'].total_output_tokens == 50
def test_record_existing_prompt(self):
"""Test that multiple calls accumulate correctly."""
tracker = TokenUsageTracker()
tracker.record('extract_nodes', 100, 50)
tracker.record('extract_nodes', 200, 100)
usage = tracker.get_usage()
assert usage['extract_nodes'].call_count == 2
assert usage['extract_nodes'].total_input_tokens == 300
assert usage['extract_nodes'].total_output_tokens == 150
def test_record_none_prompt_name(self):
"""Test that None prompt_name is recorded as 'unknown'."""
tracker = TokenUsageTracker()
tracker.record(None, 100, 50)
usage = tracker.get_usage()
assert 'unknown' in usage
assert usage['unknown'].call_count == 1
def test_record_multiple_prompts(self):
"""Test recording usage for multiple different prompts."""
tracker = TokenUsageTracker()
tracker.record('extract_nodes', 100, 50)
tracker.record('dedupe_nodes', 200, 100)
tracker.record('extract_edges', 150, 75)
usage = tracker.get_usage()
assert len(usage) == 3
assert 'extract_nodes' in usage
assert 'dedupe_nodes' in usage
assert 'extract_edges' in usage
def test_get_usage_returns_copy(self):
"""Test that get_usage returns a copy, not the internal dict."""
tracker = TokenUsageTracker()
tracker.record('test', 100, 50)
usage1 = tracker.get_usage()
usage1['test'].total_input_tokens = 9999
usage2 = tracker.get_usage()
assert usage2['test'].total_input_tokens == 100 # Original unchanged
def test_get_total_usage(self):
"""Test getting total usage across all prompts."""
tracker = TokenUsageTracker()
tracker.record('extract_nodes', 100, 50)
tracker.record('dedupe_nodes', 200, 100)
tracker.record('extract_edges', 150, 75)
total = tracker.get_total_usage()
assert total.input_tokens == 450
assert total.output_tokens == 225
assert total.total_tokens == 675
def test_get_total_usage_empty(self):
"""Test getting total usage when no records exist."""
tracker = TokenUsageTracker()
total = tracker.get_total_usage()
assert total.input_tokens == 0
assert total.output_tokens == 0
def test_reset(self):
"""Test that reset clears all tracked usage."""
tracker = TokenUsageTracker()
tracker.record('extract_nodes', 100, 50)
tracker.record('dedupe_nodes', 200, 100)
tracker.reset()
usage = tracker.get_usage()
assert len(usage) == 0
total = tracker.get_total_usage()
assert total.total_tokens == 0
def test_thread_safety(self):
"""Test that concurrent access from multiple threads is safe."""
tracker = TokenUsageTracker()
num_threads = 10
calls_per_thread = 100
def record_tokens(thread_id):
for _ in range(calls_per_thread):
tracker.record(f'prompt_{thread_id}', 10, 5)
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = [executor.submit(record_tokens, i) for i in range(num_threads)]
for f in futures:
f.result()
usage = tracker.get_usage()
assert len(usage) == num_threads
total = tracker.get_total_usage()
expected_input = num_threads * calls_per_thread * 10
expected_output = num_threads * calls_per_thread * 5
assert total.input_tokens == expected_input
assert total.output_tokens == expected_output
def test_concurrent_same_prompt(self):
"""Test concurrent access to the same prompt name."""
tracker = TokenUsageTracker()
num_threads = 10
calls_per_thread = 100
def record_tokens():
for _ in range(calls_per_thread):
tracker.record('shared_prompt', 10, 5)
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = [executor.submit(record_tokens) for _ in range(num_threads)]
for f in futures:
f.result()
usage = tracker.get_usage()
assert usage['shared_prompt'].call_count == num_threads * calls_per_thread
assert usage['shared_prompt'].total_input_tokens == num_threads * calls_per_thread * 10
assert usage['shared_prompt'].total_output_tokens == num_threads * calls_per_thread * 5
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/llm_client/test_token_tracker.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:tests/test_add_triplet.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from unittest.mock import AsyncMock, Mock, patch
import pytest
from graphiti_core.cross_encoder.client import CrossEncoderClient
from graphiti_core.edges import EntityEdge
from graphiti_core.graphiti import Graphiti
from graphiti_core.llm_client import LLMClient
from graphiti_core.nodes import EntityNode
from tests.helpers_test import group_id
pytest_plugins = ('pytest_asyncio', 'tests.helpers_test')
@pytest.fixture
def mock_llm_client():
"""Create a mock LLM"""
mock_llm = Mock(spec=LLMClient)
mock_llm.config = Mock()
mock_llm.model = 'test-model'
mock_llm.small_model = 'test-small-model'
mock_llm.temperature = 0.0
mock_llm.max_tokens = 1000
mock_llm.cache_enabled = False
mock_llm.cache_dir = None
# Mock the public method that's actually called
mock_llm.generate_response = AsyncMock()
mock_llm.generate_response.return_value = {
'duplicate_facts': [],
'invalidate_facts': [],
}
return mock_llm
@pytest.fixture
def mock_cross_encoder_client():
"""Create a mock cross encoder"""
mock_ce = Mock(spec=CrossEncoderClient)
mock_ce.config = Mock()
mock_ce.rerank = AsyncMock()
mock_ce.rerank.return_value = []
return mock_ce
@pytest.mark.asyncio
async def test_add_triplet_merges_attributes(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that attributes are merged (not replaced) when adding a triplet."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create an existing node with some attributes
existing_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Existing summary',
attributes={'age': 30, 'city': 'New York'},
)
await existing_source.generate_name_embedding(mock_embedder)
await existing_source.save(graph_driver)
# Create a user-provided node with additional attributes
user_source = EntityNode(
uuid=existing_source.uuid, # Same UUID to trigger direct lookup
name='Alice',
group_id=group_id,
labels=['Person', 'Employee'],
created_at=now,
summary='Updated summary',
attributes={'age': 31, 'department': 'Engineering'}, # age updated, department added
)
# Create target node
user_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={'age': 25},
)
# Create edge
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='WORKS_WITH',
fact='Alice works with Bob',
group_id=group_id,
created_at=now,
)
# Mock the search functions to return empty results
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
mock_resolve_edge.return_value = (edge, [], [])
await graphiti.add_triplet(user_source, edge, user_target)
# Verify attributes were merged (not replaced)
# The resolved node should have both existing and new attributes
retrieved_source = await EntityNode.get_by_uuid(graph_driver, existing_source.uuid)
assert 'age' in retrieved_source.attributes
assert retrieved_source.attributes['age'] == 31 # Updated value
assert retrieved_source.attributes['city'] == 'New York' # Preserved
assert retrieved_source.attributes['department'] == 'Engineering' # Added
assert retrieved_source.summary == 'Updated summary'
@pytest.mark.asyncio
async def test_add_triplet_updates_summary(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that summary is updated when provided by user."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create an existing node with a summary
existing_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Old summary',
attributes={},
)
await existing_target.generate_name_embedding(mock_embedder)
await existing_target.save(graph_driver)
# Create user-provided nodes
user_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={},
)
user_target = EntityNode(
uuid=existing_target.uuid,
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='New summary for Bob',
attributes={},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
mock_resolve_edge.return_value = (edge, [], [])
await graphiti.add_triplet(user_source, edge, user_target)
# Verify summary was updated
retrieved_target = await EntityNode.get_by_uuid(graph_driver, existing_target.uuid)
assert retrieved_target.summary == 'New summary for Bob'
@pytest.mark.asyncio
async def test_add_triplet_updates_labels(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that labels are updated when provided by user."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create an existing node with labels
existing_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='',
attributes={},
)
await existing_source.generate_name_embedding(mock_embedder)
await existing_source.save(graph_driver)
# Create user-provided node with different labels
user_source = EntityNode(
uuid=existing_source.uuid,
name='Alice',
group_id=group_id,
labels=['Person', 'Employee', 'Manager'],
created_at=now,
summary='',
attributes={},
)
user_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='',
attributes={},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='MANAGES',
fact='Alice manages Bob',
group_id=group_id,
created_at=now,
)
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
mock_resolve_edge.return_value = (edge, [], [])
await graphiti.add_triplet(user_source, edge, user_target)
# Verify labels were updated
retrieved_source = await EntityNode.get_by_uuid(graph_driver, existing_source.uuid)
# Labels should be set to user-provided labels (not merged)
assert set(retrieved_source.labels) == {'Person', 'Employee', 'Manager'}
@pytest.mark.asyncio
async def test_add_triplet_with_new_nodes_no_uuid(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test add_triplet with nodes that don't have UUIDs (will be resolved)."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create user-provided nodes without UUIDs
user_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={'age': 30},
)
user_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={'age': 25},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
with patch('graphiti_core.graphiti.search') as mock_search:
mock_search.return_value = Mock(edges=[])
with patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge:
mock_resolve_edge.return_value = (edge, [], [])
result = await graphiti.add_triplet(user_source, edge, user_target)
# Verify nodes were created with user-provided attributes
assert len(result.nodes) >= 2
# Find the nodes in the result
source_in_result = next((n for n in result.nodes if n.name == 'Alice'), None)
target_in_result = next((n for n in result.nodes if n.name == 'Bob'), None)
if source_in_result:
assert source_in_result.attributes.get('age') == 30
assert source_in_result.summary == 'Alice summary'
if target_in_result:
assert target_in_result.attributes.get('age') == 25
assert target_in_result.summary == 'Bob summary'
@pytest.mark.asyncio
async def test_add_triplet_preserves_existing_attributes(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that existing attributes are preserved when merging new ones."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create an existing node with multiple attributes
existing_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Existing summary',
attributes={
'age': 30,
'city': 'New York',
'country': 'USA',
'email': 'alice@example.com',
},
)
await existing_source.generate_name_embedding(mock_embedder)
await existing_source.save(graph_driver)
# Create user-provided node with only some attributes
user_source = EntityNode(
uuid=existing_source.uuid,
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Updated summary',
attributes={'age': 31, 'city': 'Boston'}, # Only updating age and city
)
user_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='',
attributes={},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
mock_resolve_edge.return_value = (edge, [], [])
await graphiti.add_triplet(user_source, edge, user_target)
# Verify all attributes are preserved/updated correctly
retrieved_source = await EntityNode.get_by_uuid(graph_driver, existing_source.uuid)
assert retrieved_source.attributes['age'] == 31 # Updated
assert retrieved_source.attributes['city'] == 'Boston' # Updated
assert retrieved_source.attributes['country'] == 'USA' # Preserved
assert retrieved_source.attributes['email'] == 'alice@example.com' # Preserved
assert retrieved_source.summary == 'Updated summary'
@pytest.mark.asyncio
async def test_add_triplet_empty_attributes_preserved(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that nodes with empty attributes don't overwrite existing attributes."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create an existing node with attributes
existing_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Existing summary',
attributes={'age': 30, 'city': 'New York'},
)
await existing_source.generate_name_embedding(mock_embedder)
await existing_source.save(graph_driver)
# Create user-provided node with empty attributes
user_source = EntityNode(
uuid=existing_source.uuid,
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='', # Empty summary should not overwrite
attributes={}, # Empty attributes should not overwrite
)
user_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='',
attributes={},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
mock_resolve_edge.return_value = (edge, [], [])
await graphiti.add_triplet(user_source, edge, user_target)
# Verify existing attributes are preserved when user provides empty dict
retrieved_source = await EntityNode.get_by_uuid(graph_driver, existing_source.uuid)
# Empty attributes dict should not clear existing attributes
assert 'age' in retrieved_source.attributes
assert 'city' in retrieved_source.attributes
# Empty summary should not overwrite existing summary
assert retrieved_source.summary == 'Existing summary'
@pytest.mark.asyncio
async def test_add_triplet_invalid_source_uuid(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that ValueError is raised when source_node has a UUID that doesn't exist."""
from uuid import uuid4
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create a node with a UUID that doesn't exist in the database
invalid_uuid = str(uuid4())
user_source = EntityNode(
uuid=invalid_uuid,
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={'age': 30},
)
user_target = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={'age': 25},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
# Should raise ValueError for invalid source UUID
with pytest.raises(ValueError, match=f'Node with UUID {invalid_uuid} not found'):
await graphiti.add_triplet(user_source, edge, user_target)
@pytest.mark.asyncio
async def test_add_triplet_invalid_target_uuid(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that ValueError is raised when target_node has a UUID that doesn't exist."""
from uuid import uuid4
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create an existing source node
existing_source = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={'age': 30},
)
await existing_source.generate_name_embedding(mock_embedder)
await existing_source.save(graph_driver)
# Create a target node with a UUID that doesn't exist in the database
invalid_uuid = str(uuid4())
user_source = EntityNode(
uuid=existing_source.uuid,
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={'age': 30},
)
user_target = EntityNode(
uuid=invalid_uuid,
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={'age': 25},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
# Should raise ValueError for invalid target UUID
with pytest.raises(ValueError, match=f'Node with UUID {invalid_uuid} not found'):
await graphiti.add_triplet(user_source, edge, user_target)
@pytest.mark.asyncio
async def test_add_triplet_invalid_both_uuids(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that ValueError is raised for source_node first when both UUIDs are invalid."""
from uuid import uuid4
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create nodes with UUIDs that don't exist in the database
invalid_source_uuid = str(uuid4())
invalid_target_uuid = str(uuid4())
user_source = EntityNode(
uuid=invalid_source_uuid,
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={'age': 30},
)
user_target = EntityNode(
uuid=invalid_target_uuid,
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={'age': 25},
)
edge = EntityEdge(
source_node_uuid=user_source.uuid,
target_node_uuid=user_target.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
# Should raise ValueError for source UUID first (source is checked before target)
with pytest.raises(ValueError, match=f'Node with UUID {invalid_source_uuid} not found'):
await graphiti.add_triplet(user_source, edge, user_target)
@pytest.mark.asyncio
async def test_add_triplet_edge_uuid_with_different_nodes_creates_new_edge(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that providing an edge UUID with different src/dst nodes creates a new edge."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create existing nodes: Alice and Bob
alice = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={},
)
await alice.generate_name_embedding(mock_embedder)
await alice.save(graph_driver)
bob = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={},
)
await bob.generate_name_embedding(mock_embedder)
await bob.save(graph_driver)
# Create a third node: Charlie
charlie = EntityNode(
name='Charlie',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Charlie summary',
attributes={},
)
await charlie.generate_name_embedding(mock_embedder)
await charlie.save(graph_driver)
# Create an existing edge between Alice and Bob
existing_edge = EntityEdge(
source_node_uuid=alice.uuid,
target_node_uuid=bob.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
await existing_edge.generate_embedding(mock_embedder)
await existing_edge.save(graph_driver)
# Now try to add a triplet using the existing edge UUID but with different nodes (Alice -> Charlie)
new_edge_with_same_uuid = EntityEdge(
uuid=existing_edge.uuid, # Reuse the existing edge's UUID
source_node_uuid=alice.uuid,
target_node_uuid=charlie.uuid, # Different target!
name='KNOWS',
fact='Alice knows Charlie',
group_id=group_id,
created_at=now,
)
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
# Return the edge as-is (simulating no deduplication)
mock_resolve_edge.return_value = (new_edge_with_same_uuid, [], [])
result = await graphiti.add_triplet(alice, new_edge_with_same_uuid, charlie)
# The original edge (Alice -> Bob) should still exist
original_edge = await EntityEdge.get_by_uuid(graph_driver, existing_edge.uuid)
assert original_edge.source_node_uuid == alice.uuid
assert original_edge.target_node_uuid == bob.uuid
assert original_edge.fact == 'Alice knows Bob'
# The new edge should have a different UUID
new_edge = result.edges[0]
assert new_edge.uuid != existing_edge.uuid
assert new_edge.source_node_uuid == alice.uuid
assert new_edge.target_node_uuid == charlie.uuid
@pytest.mark.asyncio
async def test_add_triplet_edge_uuid_with_same_nodes_updates_edge(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
"""Test that providing an edge UUID with same src/dst nodes allows updating the edge."""
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create existing nodes: Alice and Bob
alice = EntityNode(
name='Alice',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Alice summary',
attributes={},
)
await alice.generate_name_embedding(mock_embedder)
await alice.save(graph_driver)
bob = EntityNode(
name='Bob',
group_id=group_id,
labels=['Person'],
created_at=now,
summary='Bob summary',
attributes={},
)
await bob.generate_name_embedding(mock_embedder)
await bob.save(graph_driver)
# Create an existing edge between Alice and Bob
existing_edge = EntityEdge(
source_node_uuid=alice.uuid,
target_node_uuid=bob.uuid,
name='KNOWS',
fact='Alice knows Bob',
group_id=group_id,
created_at=now,
)
await existing_edge.generate_embedding(mock_embedder)
await existing_edge.save(graph_driver)
# Now update the edge with the same source/target but different fact
updated_edge = EntityEdge(
uuid=existing_edge.uuid, # Reuse the existing edge's UUID
source_node_uuid=alice.uuid,
target_node_uuid=bob.uuid, # Same target
name='WORKS_WITH',
fact='Alice works with Bob', # Updated fact
group_id=group_id,
created_at=now,
)
with (
patch('graphiti_core.graphiti.search') as mock_search,
patch('graphiti_core.graphiti.resolve_extracted_edge') as mock_resolve_edge,
):
mock_search.return_value = Mock(edges=[])
mock_resolve_edge.return_value = (updated_edge, [], [])
result = await graphiti.add_triplet(alice, updated_edge, bob)
# The edge should keep the same UUID (update allowed)
result_edge = result.edges[0]
assert result_edge.uuid == existing_edge.uuid
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/test_add_triplet.py",
"license": "Apache License 2.0",
"lines": 719,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:examples/quickstart/dense_vs_normal_ingestion.py | """
Copyright 2025, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Dense vs Normal Episode Ingestion Example
-----------------------------------------
This example demonstrates how Graphiti handles different types of content:
1. Normal Content (prose, narrative, conversations):
- Lower entity density (few entities per token)
- Processed in a single LLM call
- Examples: meeting transcripts, news articles, documentation
2. Dense Content (structured data with many entities):
- High entity density (many entities per token)
- Automatically chunked for reliable extraction
- Examples: bulk data imports, cost reports, entity-dense JSON
The chunking behavior is controlled by environment variables:
- CHUNK_MIN_TOKENS: Minimum tokens before considering chunking (default: 1000)
- CHUNK_DENSITY_THRESHOLD: Entity density threshold (default: 0.15)
- CHUNK_TOKEN_SIZE: Target size per chunk (default: 3000)
- CHUNK_OVERLAP_TOKENS: Overlap between chunks (default: 200)
"""
import asyncio
import json
import logging
import os
from datetime import datetime, timezone
from logging import INFO
from dotenv import load_dotenv
from graphiti_core import Graphiti
from graphiti_core.nodes import EpisodeType
#################################################
# CONFIGURATION
#################################################
logging.basicConfig(
level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
load_dotenv()
neo4j_uri = os.environ.get('NEO4J_URI', 'bolt://localhost:7687')
neo4j_user = os.environ.get('NEO4J_USER', 'neo4j')
neo4j_password = os.environ.get('NEO4J_PASSWORD', 'password')
if not neo4j_uri or not neo4j_user or not neo4j_password:
raise ValueError('NEO4J_URI, NEO4J_USER, and NEO4J_PASSWORD must be set')
#################################################
# EXAMPLE DATA
#################################################
# Normal content: A meeting transcript (low entity density)
# This is prose/narrative content with few entities per token.
# It will NOT trigger chunking - processed in a single LLM call.
NORMAL_EPISODE_CONTENT = """
Meeting Notes - Q4 Planning Session
Alice opened the meeting by reviewing our progress on the mobile app redesign.
She mentioned that the user research phase went well and highlighted key findings
from the customer interviews conducted last month.
Bob then presented the engineering timeline. He explained that the backend API
refactoring is about 60% complete and should be finished by end of November.
The team has resolved most of the performance issues identified in the load tests.
Carol raised concerns about the holiday freeze period affecting our deployment
schedule. She suggested we move the beta launch to early December to give the
QA team enough time for regression testing before the code freeze.
David agreed with Carol's assessment and proposed allocating two additional
engineers from the platform team to help with the testing effort. He also
mentioned that the documentation needs to be updated before the release.
Action items:
- Alice will finalize the design specs by Friday
- Bob will coordinate with the platform team on resource allocation
- Carol will update the project timeline in Jira
- David will schedule a follow-up meeting for next Tuesday
The meeting concluded at 3:30 PM with agreement to reconvene next week.
"""
# Dense content: AWS cost data (high entity density)
# This is structured data with many entities per token.
# It WILL trigger chunking - processed in multiple LLM calls.
DENSE_EPISODE_CONTENT = {
'report_type': 'AWS Cost Breakdown',
'months': [
{
'period': '2025-01',
'services': [
{'name': 'Amazon S3', 'cost': 2487.97},
{'name': 'Amazon RDS', 'cost': 1071.74},
{'name': 'Amazon ECS', 'cost': 853.74},
{'name': 'Amazon OpenSearch', 'cost': 389.74},
{'name': 'AWS Secrets Manager', 'cost': 265.77},
{'name': 'CloudWatch', 'cost': 232.34},
{'name': 'Amazon VPC', 'cost': 238.39},
{'name': 'EC2 Other', 'cost': 226.82},
{'name': 'Amazon EC2 Compute', 'cost': 78.27},
{'name': 'Amazon DocumentDB', 'cost': 65.40},
{'name': 'Amazon ECR', 'cost': 29.00},
{'name': 'Amazon ELB', 'cost': 37.53},
],
},
{
'period': '2025-02',
'services': [
{'name': 'Amazon S3', 'cost': 2721.04},
{'name': 'Amazon RDS', 'cost': 1035.77},
{'name': 'Amazon ECS', 'cost': 779.49},
{'name': 'Amazon OpenSearch', 'cost': 357.90},
{'name': 'AWS Secrets Manager', 'cost': 268.57},
{'name': 'CloudWatch', 'cost': 224.57},
{'name': 'Amazon VPC', 'cost': 215.15},
{'name': 'EC2 Other', 'cost': 213.86},
{'name': 'Amazon EC2 Compute', 'cost': 70.70},
{'name': 'Amazon DocumentDB', 'cost': 59.07},
{'name': 'Amazon ECR', 'cost': 33.92},
{'name': 'Amazon ELB', 'cost': 33.89},
],
},
{
'period': '2025-03',
'services': [
{'name': 'Amazon S3', 'cost': 2952.31},
{'name': 'Amazon RDS', 'cost': 1198.79},
{'name': 'Amazon ECS', 'cost': 869.78},
{'name': 'Amazon OpenSearch', 'cost': 389.75},
{'name': 'AWS Secrets Manager', 'cost': 271.33},
{'name': 'CloudWatch', 'cost': 233.00},
{'name': 'Amazon VPC', 'cost': 238.31},
{'name': 'EC2 Other', 'cost': 227.78},
{'name': 'Amazon EC2 Compute', 'cost': 78.21},
{'name': 'Amazon DocumentDB', 'cost': 65.40},
{'name': 'Amazon ECR', 'cost': 33.75},
{'name': 'Amazon ELB', 'cost': 37.54},
],
},
{
'period': '2025-04',
'services': [
{'name': 'Amazon S3', 'cost': 3189.62},
{'name': 'Amazon RDS', 'cost': 1102.30},
{'name': 'Amazon ECS', 'cost': 848.19},
{'name': 'Amazon OpenSearch', 'cost': 379.14},
{'name': 'AWS Secrets Manager', 'cost': 270.89},
{'name': 'CloudWatch', 'cost': 230.64},
{'name': 'Amazon VPC', 'cost': 230.54},
{'name': 'EC2 Other', 'cost': 220.18},
{'name': 'Amazon EC2 Compute', 'cost': 75.70},
{'name': 'Amazon DocumentDB', 'cost': 63.29},
{'name': 'Amazon ECR', 'cost': 35.21},
{'name': 'Amazon ELB', 'cost': 36.30},
],
},
{
'period': '2025-05',
'services': [
{'name': 'Amazon S3', 'cost': 3423.07},
{'name': 'Amazon RDS', 'cost': 1014.50},
{'name': 'Amazon ECS', 'cost': 874.75},
{'name': 'Amazon OpenSearch', 'cost': 389.71},
{'name': 'AWS Secrets Manager', 'cost': 274.91},
{'name': 'CloudWatch', 'cost': 233.28},
{'name': 'Amazon VPC', 'cost': 238.53},
{'name': 'EC2 Other', 'cost': 227.27},
{'name': 'Amazon EC2 Compute', 'cost': 78.27},
{'name': 'Amazon DocumentDB', 'cost': 65.40},
{'name': 'Amazon ECR', 'cost': 37.42},
{'name': 'Amazon ELB', 'cost': 37.52},
],
},
{
'period': '2025-06',
'services': [
{'name': 'Amazon S3', 'cost': 3658.14},
{'name': 'Amazon RDS', 'cost': 963.60},
{'name': 'Amazon ECS', 'cost': 942.45},
{'name': 'Amazon OpenSearch', 'cost': 379.06},
{'name': 'AWS Secrets Manager', 'cost': 282.41},
{'name': 'CloudWatch', 'cost': 230.69},
{'name': 'Amazon VPC', 'cost': 230.64},
{'name': 'EC2 Other', 'cost': 220.11},
{'name': 'Amazon EC2 Compute', 'cost': 75.74},
{'name': 'Amazon DocumentDB', 'cost': 63.30},
{'name': 'Amazon ECR', 'cost': 39.49},
{'name': 'Amazon ELB', 'cost': 36.30},
],
},
],
}
# Message content: A conversation (moderate entity density)
# This demonstrates message-type episodes with speaker patterns.
MESSAGE_EPISODE_CONTENT = """User: I need to set up a new development environment for the analytics project.
Assistant: I can help with that. Which cloud provider are you using - AWS, GCP, or Azure?
User: We're using AWS. The project requires PostgreSQL, Redis, and Elasticsearch.
Assistant: Great choice. For PostgreSQL, I recommend Amazon RDS with the db.r5.large instance type. For Redis, you can use Amazon ElastiCache. For Elasticsearch, Amazon OpenSearch Service would be ideal.
User: What about the compute layer? We need to run Python data pipelines.
Assistant: For Python data pipelines, I suggest Amazon ECS with Fargate for serverless containers, or AWS Lambda for smaller, event-driven tasks. If you need more control, EC2 instances with Auto Scaling would work well.
User: Let's go with ECS Fargate. Can you also recommend a CI/CD setup?
Assistant: For CI/CD with ECS Fargate, I recommend AWS CodePipeline integrated with CodeBuild for building container images, and CodeDeploy for blue-green deployments. You can store your container images in Amazon ECR.
"""
async def main():
graphiti = Graphiti(neo4j_uri, neo4j_user, neo4j_password)
try:
#################################################
# EXAMPLE 1: Normal Content (No Chunking)
#################################################
# This prose content has low entity density.
# Graphiti will process it in a single LLM call.
#################################################
print('=' * 60)
print('EXAMPLE 1: Normal Content (Meeting Transcript)')
print('=' * 60)
print(f'Content length: {len(NORMAL_EPISODE_CONTENT)} characters')
print(f'Estimated tokens: ~{len(NORMAL_EPISODE_CONTENT) // 4}')
print('Expected behavior: Single LLM call (no chunking)')
print()
await graphiti.add_episode(
name='Q4 Planning Meeting',
episode_body=NORMAL_EPISODE_CONTENT,
source=EpisodeType.text,
source_description='Meeting transcript',
reference_time=datetime.now(timezone.utc),
)
print('Successfully added normal episode\n')
#################################################
# EXAMPLE 2: Dense Content (Chunking Triggered)
#################################################
# This structured data has high entity density.
# Graphiti will automatically chunk it for
# reliable extraction across multiple LLM calls.
#################################################
print('=' * 60)
print('EXAMPLE 2: Dense Content (AWS Cost Report)')
print('=' * 60)
dense_json = json.dumps(DENSE_EPISODE_CONTENT)
print(f'Content length: {len(dense_json)} characters')
print(f'Estimated tokens: ~{len(dense_json) // 4}')
print('Expected behavior: Multiple LLM calls (chunking enabled)')
print()
await graphiti.add_episode(
name='AWS Cost Report 2025 H1',
episode_body=dense_json,
source=EpisodeType.json,
source_description='AWS cost breakdown by service',
reference_time=datetime.now(timezone.utc),
)
print('Successfully added dense episode\n')
#################################################
# EXAMPLE 3: Message Content
#################################################
# Conversation content with speaker patterns.
# Chunking preserves message boundaries.
#################################################
print('=' * 60)
print('EXAMPLE 3: Message Content (Conversation)')
print('=' * 60)
print(f'Content length: {len(MESSAGE_EPISODE_CONTENT)} characters')
print(f'Estimated tokens: ~{len(MESSAGE_EPISODE_CONTENT) // 4}')
print('Expected behavior: Depends on density threshold')
print()
await graphiti.add_episode(
name='Dev Environment Setup Chat',
episode_body=MESSAGE_EPISODE_CONTENT,
source=EpisodeType.message,
source_description='Support conversation',
reference_time=datetime.now(timezone.utc),
)
print('Successfully added message episode\n')
#################################################
# SEARCH RESULTS
#################################################
print('=' * 60)
print('SEARCH: Verifying extracted entities')
print('=' * 60)
# Search for entities from normal content
print("\nSearching for: 'Q4 planning meeting participants'")
results = await graphiti.search('Q4 planning meeting participants')
print(f'Found {len(results)} results')
for r in results[:3]:
print(f' - {r.fact}')
# Search for entities from dense content
print("\nSearching for: 'AWS S3 costs'")
results = await graphiti.search('AWS S3 costs')
print(f'Found {len(results)} results')
for r in results[:3]:
print(f' - {r.fact}')
# Search for entities from message content
print("\nSearching for: 'ECS Fargate recommendations'")
results = await graphiti.search('ECS Fargate recommendations')
print(f'Found {len(results)} results')
for r in results[:3]:
print(f' - {r.fact}')
finally:
await graphiti.close()
print('\nConnection closed')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "examples/quickstart/dense_vs_normal_ingestion.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/utils/content_chunking.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import random
import re
from itertools import combinations
from math import comb
from typing import TypeVar
from graphiti_core.helpers import (
CHUNK_DENSITY_THRESHOLD,
CHUNK_MIN_TOKENS,
CHUNK_OVERLAP_TOKENS,
CHUNK_TOKEN_SIZE,
)
from graphiti_core.nodes import EpisodeType
logger = logging.getLogger(__name__)
# Approximate characters per token (conservative estimate)
CHARS_PER_TOKEN = 4
def estimate_tokens(text: str) -> int:
"""Estimate token count using character-based heuristic.
Uses ~4 characters per token as a conservative estimate.
This is faster than actual tokenization and works across all LLM providers.
Args:
text: The text to estimate tokens for
Returns:
Estimated token count
"""
return len(text) // CHARS_PER_TOKEN
def _tokens_to_chars(tokens: int) -> int:
"""Convert token count to approximate character count."""
return tokens * CHARS_PER_TOKEN
def should_chunk(content: str, episode_type: EpisodeType) -> bool:
"""Determine whether content should be chunked based on size and entity density.
Only chunks content that is both:
1. Large enough to potentially cause LLM issues (>= CHUNK_MIN_TOKENS)
2. High entity density (many entities per token)
Short content processes fine regardless of density. This targets the specific
failure case of large entity-dense inputs while preserving context for
prose/narrative content and avoiding unnecessary chunking of small inputs.
Args:
content: The content to evaluate
episode_type: Type of episode (json, message, text)
Returns:
True if content is large and has high entity density
"""
tokens = estimate_tokens(content)
# Short content always processes fine - no need to chunk
if tokens < CHUNK_MIN_TOKENS:
return False
return _estimate_high_density(content, episode_type, tokens)
def _estimate_high_density(content: str, episode_type: EpisodeType, tokens: int) -> bool:
"""Estimate whether content has high entity density.
High-density content (many entities per token) benefits from chunking.
Low-density content (prose, narratives) loses context when chunked.
Args:
content: The content to analyze
episode_type: Type of episode
tokens: Pre-computed token count
Returns:
True if content appears to have high entity density
"""
if episode_type == EpisodeType.json:
return _json_likely_dense(content, tokens)
else:
return _text_likely_dense(content, tokens)
def _json_likely_dense(content: str, tokens: int) -> bool:
"""Estimate entity density for JSON content.
JSON is considered dense if it has many array elements or object keys,
as each typically represents a distinct entity or data point.
Heuristics:
- Array: Count elements, estimate entities per 1000 tokens
- Object: Count top-level keys
Args:
content: JSON string content
tokens: Token count
Returns:
True if JSON appears to have high entity density
"""
try:
data = json.loads(content)
except json.JSONDecodeError:
# Invalid JSON, fall back to text heuristics
return _text_likely_dense(content, tokens)
if isinstance(data, list):
# For arrays, each element likely contains entities
element_count = len(data)
# Estimate density: elements per 1000 tokens
density = (element_count / tokens) * 1000 if tokens > 0 else 0
return density > CHUNK_DENSITY_THRESHOLD * 1000 # Scale threshold
elif isinstance(data, dict):
# For objects, count keys recursively (shallow)
key_count = _count_json_keys(data, max_depth=2)
density = (key_count / tokens) * 1000 if tokens > 0 else 0
return density > CHUNK_DENSITY_THRESHOLD * 1000
else:
# Scalar value, no need to chunk
return False
def _count_json_keys(data: dict, max_depth: int = 2, current_depth: int = 0) -> int:
"""Count keys in a JSON object up to a certain depth.
Args:
data: Dictionary to count keys in
max_depth: Maximum depth to traverse
current_depth: Current recursion depth
Returns:
Count of keys
"""
if current_depth >= max_depth:
return 0
count = len(data)
for value in data.values():
if isinstance(value, dict):
count += _count_json_keys(value, max_depth, current_depth + 1)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
count += _count_json_keys(item, max_depth, current_depth + 1)
return count
def _text_likely_dense(content: str, tokens: int) -> bool:
"""Estimate entity density for text content.
Uses capitalized words as a proxy for named entities (people, places,
organizations, products). High ratio of capitalized words suggests
high entity density.
Args:
content: Text content
tokens: Token count
Returns:
True if text appears to have high entity density
"""
if tokens == 0:
return False
# Split into words
words = content.split()
if not words:
return False
# Count capitalized words (excluding sentence starters)
# A word is "capitalized" if it starts with uppercase and isn't all caps
capitalized_count = 0
for i, word in enumerate(words):
# Skip if it's likely a sentence starter (after . ! ? or first word)
if i == 0:
continue
if i > 0 and words[i - 1].rstrip()[-1:] in '.!?':
continue
# Check if capitalized (first char upper, not all caps)
cleaned = word.strip('.,!?;:\'"()[]{}')
if cleaned and cleaned[0].isupper() and not cleaned.isupper():
capitalized_count += 1
# Calculate density: capitalized words per 1000 tokens
density = (capitalized_count / tokens) * 1000 if tokens > 0 else 0
# Text density threshold is typically lower than JSON
# A well-written article might have 5-10% named entities
return density > CHUNK_DENSITY_THRESHOLD * 500 # Half the JSON threshold
def chunk_json_content(
content: str,
chunk_size_tokens: int | None = None,
overlap_tokens: int | None = None,
) -> list[str]:
"""Split JSON content into chunks while preserving structure.
For arrays: splits at element boundaries, keeping complete objects.
For objects: splits at top-level key boundaries.
Args:
content: JSON string to chunk
chunk_size_tokens: Target size per chunk in tokens (default from env)
overlap_tokens: Overlap between chunks in tokens (default from env)
Returns:
List of JSON string chunks
"""
chunk_size_tokens = chunk_size_tokens or CHUNK_TOKEN_SIZE
overlap_tokens = overlap_tokens or CHUNK_OVERLAP_TOKENS
chunk_size_chars = _tokens_to_chars(chunk_size_tokens)
overlap_chars = _tokens_to_chars(overlap_tokens)
try:
data = json.loads(content)
except json.JSONDecodeError:
logger.warning('Failed to parse JSON, falling back to text chunking')
return chunk_text_content(content, chunk_size_tokens, overlap_tokens)
if isinstance(data, list):
return _chunk_json_array(data, chunk_size_chars, overlap_chars)
elif isinstance(data, dict):
return _chunk_json_object(data, chunk_size_chars, overlap_chars)
else:
# Scalar value, return as-is
return [content]
def _chunk_json_array(
data: list,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Chunk a JSON array by splitting at element boundaries."""
if not data:
return ['[]']
chunks: list[str] = []
current_elements: list = []
current_size = 2 # Account for '[]'
for element in data:
element_json = json.dumps(element)
element_size = len(element_json) + 2 # Account for comma and space
# Check if adding this element would exceed chunk size
if current_elements and current_size + element_size > chunk_size_chars:
# Save current chunk
chunks.append(json.dumps(current_elements))
# Start new chunk with overlap (include last few elements)
overlap_elements = _get_overlap_elements(current_elements, overlap_chars)
current_elements = overlap_elements
current_size = len(json.dumps(current_elements)) if current_elements else 2
current_elements.append(element)
current_size += element_size
# Don't forget the last chunk
if current_elements:
chunks.append(json.dumps(current_elements))
return chunks if chunks else ['[]']
def _get_overlap_elements(elements: list, overlap_chars: int) -> list:
"""Get elements from the end of a list that fit within overlap_chars."""
if not elements:
return []
overlap_elements: list = []
current_size = 2 # Account for '[]'
for element in reversed(elements):
element_json = json.dumps(element)
element_size = len(element_json) + 2
if current_size + element_size > overlap_chars:
break
overlap_elements.insert(0, element)
current_size += element_size
return overlap_elements
def _chunk_json_object(
data: dict,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Chunk a JSON object by splitting at top-level key boundaries."""
if not data:
return ['{}']
chunks: list[str] = []
current_keys: list[str] = []
current_dict: dict = {}
current_size = 2 # Account for '{}'
for key, value in data.items():
entry_json = json.dumps({key: value})
entry_size = len(entry_json)
# Check if adding this entry would exceed chunk size
if current_dict and current_size + entry_size > chunk_size_chars:
# Save current chunk
chunks.append(json.dumps(current_dict))
# Start new chunk with overlap (include last few keys)
overlap_dict = _get_overlap_dict(current_dict, current_keys, overlap_chars)
current_dict = overlap_dict
current_keys = list(overlap_dict.keys())
current_size = len(json.dumps(current_dict)) if current_dict else 2
current_dict[key] = value
current_keys.append(key)
current_size += entry_size
# Don't forget the last chunk
if current_dict:
chunks.append(json.dumps(current_dict))
return chunks if chunks else ['{}']
def _get_overlap_dict(data: dict, keys: list[str], overlap_chars: int) -> dict:
"""Get key-value pairs from the end of a dict that fit within overlap_chars."""
if not data or not keys:
return {}
overlap_dict: dict = {}
current_size = 2 # Account for '{}'
for key in reversed(keys):
if key not in data:
continue
entry_json = json.dumps({key: data[key]})
entry_size = len(entry_json)
if current_size + entry_size > overlap_chars:
break
overlap_dict[key] = data[key]
current_size += entry_size
# Reverse to maintain original order
return dict(reversed(list(overlap_dict.items())))
def chunk_text_content(
content: str,
chunk_size_tokens: int | None = None,
overlap_tokens: int | None = None,
) -> list[str]:
"""Split text content at natural boundaries (paragraphs, sentences).
Includes overlap to capture entities at chunk boundaries.
Args:
content: Text to chunk
chunk_size_tokens: Target size per chunk in tokens (default from env)
overlap_tokens: Overlap between chunks in tokens (default from env)
Returns:
List of text chunks
"""
chunk_size_tokens = chunk_size_tokens or CHUNK_TOKEN_SIZE
overlap_tokens = overlap_tokens or CHUNK_OVERLAP_TOKENS
chunk_size_chars = _tokens_to_chars(chunk_size_tokens)
overlap_chars = _tokens_to_chars(overlap_tokens)
if len(content) <= chunk_size_chars:
return [content]
# Split into paragraphs first
paragraphs = re.split(r'\n\s*\n', content)
chunks: list[str] = []
current_chunk: list[str] = []
current_size = 0
for paragraph in paragraphs:
paragraph = paragraph.strip()
if not paragraph:
continue
para_size = len(paragraph)
# If a single paragraph is too large, split it by sentences
if para_size > chunk_size_chars:
# First, save current chunk if any
if current_chunk:
chunks.append('\n\n'.join(current_chunk))
current_chunk = []
current_size = 0
# Split large paragraph by sentences
sentence_chunks = _chunk_by_sentences(paragraph, chunk_size_chars, overlap_chars)
chunks.extend(sentence_chunks)
continue
# Check if adding this paragraph would exceed chunk size
if current_chunk and current_size + para_size + 2 > chunk_size_chars:
# Save current chunk
chunks.append('\n\n'.join(current_chunk))
# Start new chunk with overlap
overlap_text = _get_overlap_text('\n\n'.join(current_chunk), overlap_chars)
if overlap_text:
current_chunk = [overlap_text]
current_size = len(overlap_text)
else:
current_chunk = []
current_size = 0
current_chunk.append(paragraph)
current_size += para_size + 2 # Account for '\n\n'
# Don't forget the last chunk
if current_chunk:
chunks.append('\n\n'.join(current_chunk))
return chunks if chunks else [content]
def _chunk_by_sentences(
text: str,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Split text by sentence boundaries."""
# Split on sentence-ending punctuation followed by whitespace
sentence_pattern = r'(?<=[.!?])\s+'
sentences = re.split(sentence_pattern, text)
chunks: list[str] = []
current_chunk: list[str] = []
current_size = 0
for sentence in sentences:
sentence = sentence.strip()
if not sentence:
continue
sent_size = len(sentence)
# If a single sentence is too large, split it by fixed size
if sent_size > chunk_size_chars:
if current_chunk:
chunks.append(' '.join(current_chunk))
current_chunk = []
current_size = 0
# Split by fixed size as last resort
fixed_chunks = _chunk_by_size(sentence, chunk_size_chars, overlap_chars)
chunks.extend(fixed_chunks)
continue
# Check if adding this sentence would exceed chunk size
if current_chunk and current_size + sent_size + 1 > chunk_size_chars:
chunks.append(' '.join(current_chunk))
# Start new chunk with overlap
overlap_text = _get_overlap_text(' '.join(current_chunk), overlap_chars)
if overlap_text:
current_chunk = [overlap_text]
current_size = len(overlap_text)
else:
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sent_size + 1
if current_chunk:
chunks.append(' '.join(current_chunk))
return chunks
def _chunk_by_size(
text: str,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Split text by fixed character size (last resort)."""
chunks: list[str] = []
start = 0
while start < len(text):
end = min(start + chunk_size_chars, len(text))
# Try to break at word boundary
if end < len(text):
space_idx = text.rfind(' ', start, end)
if space_idx > start:
end = space_idx
chunks.append(text[start:end].strip())
# Move start forward, ensuring progress even if overlap >= chunk_size
# Always advance by at least (chunk_size - overlap) or 1 char minimum
min_progress = max(1, chunk_size_chars - overlap_chars)
start = max(start + min_progress, end - overlap_chars)
return chunks
def _get_overlap_text(text: str, overlap_chars: int) -> str:
"""Get the last overlap_chars characters of text, breaking at word boundary."""
if len(text) <= overlap_chars:
return text
overlap_start = len(text) - overlap_chars
# Find the next word boundary after overlap_start
space_idx = text.find(' ', overlap_start)
if space_idx != -1:
return text[space_idx + 1 :]
return text[overlap_start:]
def chunk_message_content(
content: str,
chunk_size_tokens: int | None = None,
overlap_tokens: int | None = None,
) -> list[str]:
"""Split conversation content preserving message boundaries.
Never splits mid-message. Messages are identified by patterns like:
- "Speaker: message"
- JSON message arrays
- Newline-separated messages
Args:
content: Conversation content to chunk
chunk_size_tokens: Target size per chunk in tokens (default from env)
overlap_tokens: Overlap between chunks in tokens (default from env)
Returns:
List of conversation chunks
"""
chunk_size_tokens = chunk_size_tokens or CHUNK_TOKEN_SIZE
overlap_tokens = overlap_tokens or CHUNK_OVERLAP_TOKENS
chunk_size_chars = _tokens_to_chars(chunk_size_tokens)
overlap_chars = _tokens_to_chars(overlap_tokens)
if len(content) <= chunk_size_chars:
return [content]
# Try to detect message format
# Check if it's JSON (array of message objects)
try:
data = json.loads(content)
if isinstance(data, list):
return _chunk_message_array(data, chunk_size_chars, overlap_chars)
except json.JSONDecodeError:
pass
# Try speaker pattern (e.g., "Alice: Hello")
speaker_pattern = r'^([A-Za-z_][A-Za-z0-9_\s]*):(.+?)(?=^[A-Za-z_][A-Za-z0-9_\s]*:|$)'
if re.search(speaker_pattern, content, re.MULTILINE | re.DOTALL):
return _chunk_speaker_messages(content, chunk_size_chars, overlap_chars)
# Fallback to line-based chunking
return _chunk_by_lines(content, chunk_size_chars, overlap_chars)
def _chunk_message_array(
messages: list,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Chunk a JSON array of message objects."""
# Delegate to JSON array chunking
chunks = _chunk_json_array(messages, chunk_size_chars, overlap_chars)
return chunks
def _chunk_speaker_messages(
content: str,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Chunk messages in 'Speaker: message' format."""
# Split on speaker patterns
pattern = r'(?=^[A-Za-z_][A-Za-z0-9_\s]*:)'
messages = re.split(pattern, content, flags=re.MULTILINE)
messages = [m.strip() for m in messages if m.strip()]
if not messages:
return [content]
chunks: list[str] = []
current_messages: list[str] = []
current_size = 0
for message in messages:
msg_size = len(message)
# If a single message is too large, include it as its own chunk
if msg_size > chunk_size_chars:
if current_messages:
chunks.append('\n'.join(current_messages))
current_messages = []
current_size = 0
chunks.append(message)
continue
if current_messages and current_size + msg_size + 1 > chunk_size_chars:
chunks.append('\n'.join(current_messages))
# Get overlap (last message(s) that fit)
overlap_messages = _get_overlap_messages(current_messages, overlap_chars)
current_messages = overlap_messages
current_size = sum(len(m) for m in current_messages) + len(current_messages) - 1
current_messages.append(message)
current_size += msg_size + 1
if current_messages:
chunks.append('\n'.join(current_messages))
return chunks if chunks else [content]
def _get_overlap_messages(messages: list[str], overlap_chars: int) -> list[str]:
"""Get messages from the end that fit within overlap_chars."""
if not messages:
return []
overlap: list[str] = []
current_size = 0
for msg in reversed(messages):
msg_size = len(msg) + 1
if current_size + msg_size > overlap_chars:
break
overlap.insert(0, msg)
current_size += msg_size
return overlap
def _chunk_by_lines(
content: str,
chunk_size_chars: int,
overlap_chars: int,
) -> list[str]:
"""Chunk content by line boundaries."""
lines = content.split('\n')
chunks: list[str] = []
current_lines: list[str] = []
current_size = 0
for line in lines:
line_size = len(line) + 1
if current_lines and current_size + line_size > chunk_size_chars:
chunks.append('\n'.join(current_lines))
# Get overlap lines
overlap_text = '\n'.join(current_lines)
overlap = _get_overlap_text(overlap_text, overlap_chars)
if overlap:
current_lines = overlap.split('\n')
current_size = len(overlap)
else:
current_lines = []
current_size = 0
current_lines.append(line)
current_size += line_size
if current_lines:
chunks.append('\n'.join(current_lines))
return chunks if chunks else [content]
T = TypeVar('T')
MAX_COMBINATIONS_TO_EVALUATE = 1000
def _random_combination(n: int, k: int) -> tuple[int, ...]:
"""Generate a random combination of k items from range(n)."""
return tuple(sorted(random.sample(range(n), k)))
def generate_covering_chunks(items: list[T], k: int) -> list[tuple[list[T], list[int]]]:
"""Generate chunks of items that cover all pairs using a greedy approach.
Based on the Handshake Flights Problem / Covering Design problem.
Each chunk of K items covers C(K,2) = K(K-1)/2 pairs. We greedily select
chunks to maximize coverage of uncovered pairs, minimizing the total number
of chunks needed to ensure every pair of items appears in at least one chunk.
For large inputs where C(n,k) > MAX_COMBINATIONS_TO_EVALUATE, random sampling
is used instead of exhaustive search to maintain performance.
Lower bound (Schönheim): F >= ceil(N/K * ceil((N-1)/(K-1)))
Args:
items: List of items to partition into covering chunks
k: Maximum number of items per chunk
Returns:
List of tuples (chunk_items, global_indices) where global_indices maps
each position in chunk_items to its index in the original items list.
"""
n = len(items)
if n <= k:
return [(items, list(range(n)))]
# Track uncovered pairs using frozensets of indices
uncovered_pairs: set[frozenset[int]] = {
frozenset([i, j]) for i in range(n) for j in range(i + 1, n)
}
chunks: list[tuple[list[T], list[int]]] = []
# Determine if we need to sample or can enumerate all combinations
total_combinations = comb(n, k)
use_sampling = total_combinations > MAX_COMBINATIONS_TO_EVALUATE
while uncovered_pairs:
# Greedy selection: find the chunk that covers the most uncovered pairs
best_chunk_indices: tuple[int, ...] | None = None
best_covered_count = 0
if use_sampling:
# Sample random combinations when there are too many to enumerate
seen_combinations: set[tuple[int, ...]] = set()
# Limit total attempts (including duplicates) to prevent infinite loops
max_total_attempts = MAX_COMBINATIONS_TO_EVALUATE * 3
total_attempts = 0
samples_evaluated = 0
while samples_evaluated < MAX_COMBINATIONS_TO_EVALUATE:
total_attempts += 1
if total_attempts > max_total_attempts:
# Too many total attempts, break to avoid infinite loop
break
chunk_indices = _random_combination(n, k)
if chunk_indices in seen_combinations:
continue
seen_combinations.add(chunk_indices)
samples_evaluated += 1
# Count how many uncovered pairs this chunk covers
covered_count = sum(
1
for i, idx_i in enumerate(chunk_indices)
for idx_j in chunk_indices[i + 1 :]
if frozenset([idx_i, idx_j]) in uncovered_pairs
)
if covered_count > best_covered_count:
best_covered_count = covered_count
best_chunk_indices = chunk_indices
else:
# Enumerate all combinations when feasible
for chunk_indices in combinations(range(n), k):
# Count how many uncovered pairs this chunk covers
covered_count = sum(
1
for i, idx_i in enumerate(chunk_indices)
for idx_j in chunk_indices[i + 1 :]
if frozenset([idx_i, idx_j]) in uncovered_pairs
)
if covered_count > best_covered_count:
best_covered_count = covered_count
best_chunk_indices = chunk_indices
if best_chunk_indices is None or best_covered_count == 0:
# Greedy search couldn't find a chunk covering uncovered pairs.
# This can happen with random sampling. Fall back to creating
# small chunks that directly cover remaining pairs.
break
# Mark pairs in this chunk as covered
for i, idx_i in enumerate(best_chunk_indices):
for idx_j in best_chunk_indices[i + 1 :]:
uncovered_pairs.discard(frozenset([idx_i, idx_j]))
chunk_items = [items[idx] for idx in best_chunk_indices]
chunks.append((chunk_items, list(best_chunk_indices)))
# Handle any remaining uncovered pairs that the greedy algorithm missed.
# This can happen when random sampling fails to find covering chunks.
# Create minimal chunks (size 2) to guarantee all pairs are covered.
for pair in uncovered_pairs:
pair_indices = sorted(pair)
chunk_items = [items[idx] for idx in pair_indices]
chunks.append((chunk_items, pair_indices))
return chunks
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/utils/content_chunking.py",
"license": "Apache License 2.0",
"lines": 635,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/utils/maintenance/test_entity_extraction.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest.mock import AsyncMock, MagicMock
import pytest
from graphiti_core.edges import EntityEdge
from graphiti_core.graphiti_types import GraphitiClients
from graphiti_core.nodes import EntityNode, EpisodeType, EpisodicNode
from graphiti_core.utils.datetime_utils import utc_now
from graphiti_core.utils.maintenance.node_operations import (
_build_entity_types_context,
_extract_entity_summaries_batch,
extract_nodes,
)
def _make_clients():
"""Create mock GraphitiClients for testing."""
driver = MagicMock()
embedder = MagicMock()
cross_encoder = MagicMock()
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_client.generate_response = llm_generate
clients = GraphitiClients.model_construct( # bypass validation to allow test doubles
driver=driver,
embedder=embedder,
cross_encoder=cross_encoder,
llm_client=llm_client,
)
return clients, llm_generate
def _make_episode(
content: str = 'Test content',
source: EpisodeType = EpisodeType.text,
group_id: str = 'group',
) -> EpisodicNode:
"""Create a test episode node."""
return EpisodicNode(
name='test_episode',
group_id=group_id,
source=source,
source_description='test',
content=content,
valid_at=utc_now(),
)
class TestExtractNodesSmallInput:
@pytest.mark.asyncio
async def test_small_input_single_llm_call(self, monkeypatch):
"""Small inputs should use a single LLM call without chunking."""
clients, llm_generate = _make_clients()
# Mock LLM response
llm_generate.return_value = {
'extracted_entities': [
{'name': 'Alice', 'entity_type_id': 0},
{'name': 'Bob', 'entity_type_id': 0},
]
}
# Small content (below threshold)
episode = _make_episode(content='Alice talked to Bob.')
nodes = await extract_nodes(
clients,
episode,
previous_episodes=[],
)
# Verify results
assert len(nodes) == 2
assert {n.name for n in nodes} == {'Alice', 'Bob'}
# LLM should be called exactly once
llm_generate.assert_awaited_once()
@pytest.mark.asyncio
async def test_extracts_entity_types(self, monkeypatch):
"""Entity type classification should work correctly."""
clients, llm_generate = _make_clients()
from pydantic import BaseModel
class Person(BaseModel):
"""A human person."""
pass
llm_generate.return_value = {
'extracted_entities': [
{'name': 'Alice', 'entity_type_id': 1}, # Person
{'name': 'Acme Corp', 'entity_type_id': 0}, # Default Entity
]
}
episode = _make_episode(content='Alice works at Acme Corp.')
nodes = await extract_nodes(
clients,
episode,
previous_episodes=[],
entity_types={'Person': Person},
)
# Alice should have Person label
alice = next(n for n in nodes if n.name == 'Alice')
assert 'Person' in alice.labels
# Acme should have Entity label
acme = next(n for n in nodes if n.name == 'Acme Corp')
assert 'Entity' in acme.labels
@pytest.mark.asyncio
async def test_excludes_entity_types(self, monkeypatch):
"""Excluded entity types should not appear in results."""
clients, llm_generate = _make_clients()
from pydantic import BaseModel
class User(BaseModel):
"""A user of the system."""
pass
llm_generate.return_value = {
'extracted_entities': [
{'name': 'Alice', 'entity_type_id': 1}, # User (excluded)
{'name': 'Project X', 'entity_type_id': 0}, # Entity
]
}
episode = _make_episode(content='Alice created Project X.')
nodes = await extract_nodes(
clients,
episode,
previous_episodes=[],
entity_types={'User': User},
excluded_entity_types=['User'],
)
# Alice should be excluded
assert len(nodes) == 1
assert nodes[0].name == 'Project X'
@pytest.mark.asyncio
async def test_filters_empty_names(self, monkeypatch):
"""Entities with empty names should be filtered out."""
clients, llm_generate = _make_clients()
llm_generate.return_value = {
'extracted_entities': [
{'name': 'Alice', 'entity_type_id': 0},
{'name': '', 'entity_type_id': 0},
{'name': ' ', 'entity_type_id': 0},
]
}
episode = _make_episode(content='Alice is here.')
nodes = await extract_nodes(
clients,
episode,
previous_episodes=[],
)
assert len(nodes) == 1
assert nodes[0].name == 'Alice'
class TestExtractNodesPromptSelection:
@pytest.mark.asyncio
async def test_uses_text_prompt_for_text_episodes(self, monkeypatch):
"""Text episodes should use extract_text prompt."""
clients, llm_generate = _make_clients()
llm_generate.return_value = {'extracted_entities': []}
episode = _make_episode(source=EpisodeType.text)
await extract_nodes(clients, episode, previous_episodes=[])
# Check prompt_name parameter
call_kwargs = llm_generate.call_args[1]
assert call_kwargs.get('prompt_name') == 'extract_nodes.extract_text'
@pytest.mark.asyncio
async def test_uses_json_prompt_for_json_episodes(self, monkeypatch):
"""JSON episodes should use extract_json prompt."""
clients, llm_generate = _make_clients()
llm_generate.return_value = {'extracted_entities': []}
episode = _make_episode(content='{}', source=EpisodeType.json)
await extract_nodes(clients, episode, previous_episodes=[])
call_kwargs = llm_generate.call_args[1]
assert call_kwargs.get('prompt_name') == 'extract_nodes.extract_json'
@pytest.mark.asyncio
async def test_uses_message_prompt_for_message_episodes(self, monkeypatch):
"""Message episodes should use extract_message prompt."""
clients, llm_generate = _make_clients()
llm_generate.return_value = {'extracted_entities': []}
episode = _make_episode(source=EpisodeType.message)
await extract_nodes(clients, episode, previous_episodes=[])
call_kwargs = llm_generate.call_args[1]
assert call_kwargs.get('prompt_name') == 'extract_nodes.extract_message'
class TestBuildEntityTypesContext:
def test_default_entity_type_always_included(self):
"""Default Entity type should always be at index 0."""
context = _build_entity_types_context(None)
assert len(context) == 1
assert context[0]['entity_type_id'] == 0
assert context[0]['entity_type_name'] == 'Entity'
def test_custom_types_added_after_default(self):
"""Custom entity types should be added with sequential IDs."""
from pydantic import BaseModel
class Person(BaseModel):
"""A human person."""
pass
class Organization(BaseModel):
"""A business or organization."""
pass
context = _build_entity_types_context(
{
'Person': Person,
'Organization': Organization,
}
)
assert len(context) == 3
assert context[0]['entity_type_name'] == 'Entity'
assert context[1]['entity_type_name'] == 'Person'
assert context[1]['entity_type_id'] == 1
assert context[2]['entity_type_name'] == 'Organization'
assert context[2]['entity_type_id'] == 2
def _make_entity_node(
name: str,
summary: str = '',
group_id: str = 'group',
uuid: str | None = None,
) -> EntityNode:
"""Create a test entity node."""
node = EntityNode(
name=name,
group_id=group_id,
labels=['Entity'],
summary=summary,
created_at=utc_now(),
)
if uuid is not None:
node.uuid = uuid
return node
def _make_entity_edge(
source_uuid: str,
target_uuid: str,
fact: str,
) -> EntityEdge:
"""Create a test entity edge."""
return EntityEdge(
source_node_uuid=source_uuid,
target_node_uuid=target_uuid,
name='TEST_RELATION',
fact=fact,
group_id='group',
created_at=utc_now(),
)
class TestExtractEntitySummariesBatch:
@pytest.mark.asyncio
async def test_no_nodes_needing_summarization(self):
"""When no nodes need summarization, no LLM call should be made."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_client.generate_response = llm_generate
# Node with short summary that doesn't need LLM
node = _make_entity_node('Alice', summary='Alice is a person.')
nodes = [node]
await _extract_entity_summaries_batch(
llm_client,
nodes,
episode=None,
previous_episodes=None,
should_summarize_node=None,
edges_by_node={},
)
# LLM should not be called
llm_generate.assert_not_awaited()
# Summary should remain unchanged
assert nodes[0].summary == 'Alice is a person.'
@pytest.mark.asyncio
async def test_short_summary_with_edge_facts(self):
"""Nodes with short summaries should have edge facts appended without LLM."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_client.generate_response = llm_generate
node = _make_entity_node('Alice', summary='Alice is a person.', uuid='alice-uuid')
edge = _make_entity_edge('alice-uuid', 'bob-uuid', 'Alice works with Bob.')
edges_by_node = {
'alice-uuid': [edge],
}
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=None,
previous_episodes=None,
should_summarize_node=None,
edges_by_node=edges_by_node,
)
# LLM should not be called
llm_generate.assert_not_awaited()
# Summary should include edge fact
assert 'Alice is a person.' in node.summary
assert 'Alice works with Bob.' in node.summary
@pytest.mark.asyncio
async def test_long_summary_needs_llm(self):
"""Nodes with long summaries should trigger LLM summarization."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_generate.return_value = {
'summaries': [
{'name': 'Alice', 'summary': 'Alice is a software engineer at Acme Corp.'}
]
}
llm_client.generate_response = llm_generate
# Create a node with a very long summary (over MAX_SUMMARY_CHARS * 4)
long_summary = 'Alice is a person. ' * 200 # ~3800 chars
node = _make_entity_node('Alice', summary=long_summary)
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=_make_episode(),
previous_episodes=[],
should_summarize_node=None,
edges_by_node={},
)
# LLM should be called
llm_generate.assert_awaited_once()
# Summary should be updated from LLM response
assert node.summary == 'Alice is a software engineer at Acme Corp.'
@pytest.mark.asyncio
async def test_should_summarize_filter(self):
"""Nodes filtered by should_summarize_node should be skipped."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_client.generate_response = llm_generate
node = _make_entity_node('Alice', summary='')
# Filter that rejects all nodes
async def reject_all(n):
return False
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=_make_episode(),
previous_episodes=[],
should_summarize_node=reject_all,
edges_by_node={},
)
# LLM should not be called
llm_generate.assert_not_awaited()
@pytest.mark.asyncio
async def test_batch_multiple_nodes(self):
"""Multiple nodes needing summarization should be batched into one call."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_generate.return_value = {
'summaries': [
{'name': 'Alice', 'summary': 'Alice summary.'},
{'name': 'Bob', 'summary': 'Bob summary.'},
]
}
llm_client.generate_response = llm_generate
# Create nodes with long summaries
long_summary = 'X ' * 1500 # Long enough to need LLM
alice = _make_entity_node('Alice', summary=long_summary)
bob = _make_entity_node('Bob', summary=long_summary)
await _extract_entity_summaries_batch(
llm_client,
[alice, bob],
episode=_make_episode(),
previous_episodes=[],
should_summarize_node=None,
edges_by_node={},
)
# LLM should be called exactly once (batch call)
llm_generate.assert_awaited_once()
# Both nodes should have updated summaries
assert alice.summary == 'Alice summary.'
assert bob.summary == 'Bob summary.'
@pytest.mark.asyncio
async def test_unknown_entity_in_response(self):
"""LLM returning unknown entity names should be logged but not crash."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_generate.return_value = {
'summaries': [
{'name': 'UnknownEntity', 'summary': 'Should be ignored.'},
{'name': 'Alice', 'summary': 'Alice summary.'},
]
}
llm_client.generate_response = llm_generate
long_summary = 'X ' * 1500
alice = _make_entity_node('Alice', summary=long_summary)
await _extract_entity_summaries_batch(
llm_client,
[alice],
episode=_make_episode(),
previous_episodes=[],
should_summarize_node=None,
edges_by_node={},
)
# Alice should have updated summary
assert alice.summary == 'Alice summary.'
@pytest.mark.asyncio
async def test_no_episode_and_no_summary(self):
"""Nodes with no summary and no episode should be skipped."""
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_client.generate_response = llm_generate
node = _make_entity_node('Alice', summary='')
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=None,
previous_episodes=None,
should_summarize_node=None,
edges_by_node={},
)
# LLM should not be called - no content to summarize
llm_generate.assert_not_awaited()
assert node.summary == ''
@pytest.mark.asyncio
async def test_flight_partitioning(self, monkeypatch):
"""Nodes should be partitioned into flights of MAX_NODES."""
# Set MAX_NODES to a small value for testing
monkeypatch.setattr('graphiti_core.utils.maintenance.node_operations.MAX_NODES', 2)
llm_client = MagicMock()
call_count = 0
call_args_list = []
async def mock_generate(*args, **kwargs):
nonlocal call_count
call_count += 1
# Extract entity names from the context
context = args[0][1].content if args else ''
call_args_list.append(context)
return {'summaries': []}
llm_client.generate_response = mock_generate
# Create 5 nodes with long summaries (need LLM)
long_summary = 'X ' * 1500
nodes = [_make_entity_node(f'Entity{i}', summary=long_summary) for i in range(5)]
await _extract_entity_summaries_batch(
llm_client,
nodes,
episode=_make_episode(),
previous_episodes=[],
should_summarize_node=None,
edges_by_node={},
)
# With MAX_NODES=2 and 5 nodes, we should have 3 flights (2+2+1)
assert call_count == 3
@pytest.mark.asyncio
async def test_case_insensitive_name_matching(self):
"""LLM response names should match case-insensitively."""
llm_client = MagicMock()
llm_generate = AsyncMock()
# LLM returns name with different casing
llm_generate.return_value = {
'summaries': [
{'name': 'ALICE', 'summary': 'Alice summary from LLM.'},
]
}
llm_client.generate_response = llm_generate
# Node has lowercase name
long_summary = 'X ' * 1500
node = _make_entity_node('alice', summary=long_summary)
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=_make_episode(),
previous_episodes=[],
should_summarize_node=None,
edges_by_node={},
)
# Should match despite case difference
assert node.summary == 'Alice summary from LLM.'
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/utils/maintenance/test_entity_extraction.py",
"license": "Apache License 2.0",
"lines": 452,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:tests/utils/test_content_chunking.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from graphiti_core.nodes import EpisodeType
from graphiti_core.utils.content_chunking import (
CHARS_PER_TOKEN,
_count_json_keys,
_json_likely_dense,
_text_likely_dense,
chunk_json_content,
chunk_message_content,
chunk_text_content,
estimate_tokens,
generate_covering_chunks,
should_chunk,
)
class TestEstimateTokens:
def test_empty_string(self):
assert estimate_tokens('') == 0
def test_short_string(self):
# 4 chars per token
assert estimate_tokens('abcd') == 1
assert estimate_tokens('abcdefgh') == 2
def test_long_string(self):
text = 'a' * 400
assert estimate_tokens(text) == 100
def test_uses_chars_per_token_constant(self):
text = 'x' * (CHARS_PER_TOKEN * 10)
assert estimate_tokens(text) == 10
class TestChunkJsonArray:
def test_small_array_no_chunking(self):
data = [{'name': 'Alice'}, {'name': 'Bob'}]
content = json.dumps(data)
chunks = chunk_json_content(content, chunk_size_tokens=1000)
assert len(chunks) == 1
assert json.loads(chunks[0]) == data
def test_empty_array(self):
chunks = chunk_json_content('[]', chunk_size_tokens=100)
assert chunks == ['[]']
def test_array_splits_at_element_boundaries(self):
# Create array that exceeds chunk size
data = [{'id': i, 'data': 'x' * 100} for i in range(20)]
content = json.dumps(data)
# Use small chunk size to force splitting
chunks = chunk_json_content(content, chunk_size_tokens=100, overlap_tokens=20)
# Verify all chunks are valid JSON arrays
for chunk in chunks:
parsed = json.loads(chunk)
assert isinstance(parsed, list)
# Each element should be a complete object
for item in parsed:
assert 'id' in item
assert 'data' in item
def test_array_preserves_all_elements(self):
data = [{'id': i} for i in range(10)]
content = json.dumps(data)
chunks = chunk_json_content(content, chunk_size_tokens=50, overlap_tokens=10)
# Collect all unique IDs across chunks (accounting for overlap)
seen_ids = set()
for chunk in chunks:
parsed = json.loads(chunk)
for item in parsed:
seen_ids.add(item['id'])
# All original IDs should be present
assert seen_ids == set(range(10))
class TestChunkJsonObject:
def test_small_object_no_chunking(self):
data = {'name': 'Alice', 'age': 30}
content = json.dumps(data)
chunks = chunk_json_content(content, chunk_size_tokens=1000)
assert len(chunks) == 1
assert json.loads(chunks[0]) == data
def test_empty_object(self):
chunks = chunk_json_content('{}', chunk_size_tokens=100)
assert chunks == ['{}']
def test_object_splits_at_key_boundaries(self):
# Create object that exceeds chunk size
data = {f'key_{i}': 'x' * 100 for i in range(20)}
content = json.dumps(data)
chunks = chunk_json_content(content, chunk_size_tokens=100, overlap_tokens=20)
# Verify all chunks are valid JSON objects
for chunk in chunks:
parsed = json.loads(chunk)
assert isinstance(parsed, dict)
# Each key-value pair should be complete
for key in parsed:
assert key.startswith('key_')
def test_object_preserves_all_keys(self):
data = {f'key_{i}': f'value_{i}' for i in range(10)}
content = json.dumps(data)
chunks = chunk_json_content(content, chunk_size_tokens=50, overlap_tokens=10)
# Collect all unique keys across chunks
seen_keys = set()
for chunk in chunks:
parsed = json.loads(chunk)
seen_keys.update(parsed.keys())
# All original keys should be present
expected_keys = {f'key_{i}' for i in range(10)}
assert seen_keys == expected_keys
class TestChunkJsonInvalid:
def test_invalid_json_falls_back_to_text(self):
invalid_json = 'not valid json {'
chunks = chunk_json_content(invalid_json, chunk_size_tokens=1000)
# Should fall back to text chunking
assert len(chunks) >= 1
assert invalid_json in chunks[0]
def test_scalar_value_returns_as_is(self):
for scalar in ['"string"', '123', 'true', 'null']:
chunks = chunk_json_content(scalar, chunk_size_tokens=1000)
assert chunks == [scalar]
class TestChunkTextContent:
def test_small_text_no_chunking(self):
text = 'This is a short text.'
chunks = chunk_text_content(text, chunk_size_tokens=1000)
assert len(chunks) == 1
assert chunks[0] == text
def test_splits_at_paragraph_boundaries(self):
paragraphs = ['Paragraph one.', 'Paragraph two.', 'Paragraph three.']
text = '\n\n'.join(paragraphs)
# Use small chunk size to force splitting
chunks = chunk_text_content(text, chunk_size_tokens=10, overlap_tokens=5)
# Each chunk should contain complete paragraphs (possibly with overlap)
for chunk in chunks:
# Should not have partial words cut off mid-paragraph
assert not chunk.endswith(' ')
def test_splits_at_sentence_boundaries_for_large_paragraphs(self):
# Create a single long paragraph with multiple sentences
sentences = ['This is sentence number ' + str(i) + '.' for i in range(20)]
long_paragraph = ' '.join(sentences)
chunks = chunk_text_content(long_paragraph, chunk_size_tokens=50, overlap_tokens=10)
# Should have multiple chunks
assert len(chunks) > 1
# Each chunk should end at a sentence boundary where possible
for chunk in chunks[:-1]: # All except last
# Should end with sentence punctuation or continue to next chunk
assert chunk[-1] in '.!? ' or True # Allow flexibility
def test_preserves_text_completeness(self):
text = 'Alpha beta gamma delta epsilon zeta eta theta.'
chunks = chunk_text_content(text, chunk_size_tokens=10, overlap_tokens=2)
# All words should appear in at least one chunk
all_words = set(text.replace('.', '').split())
found_words = set()
for chunk in chunks:
found_words.update(chunk.replace('.', '').split())
assert all_words <= found_words
class TestChunkMessageContent:
def test_small_message_no_chunking(self):
content = 'Alice: Hello!\nBob: Hi there!'
chunks = chunk_message_content(content, chunk_size_tokens=1000)
assert len(chunks) == 1
assert chunks[0] == content
def test_preserves_speaker_message_format(self):
messages = [f'Speaker{i}: This is message number {i}.' for i in range(10)]
content = '\n'.join(messages)
chunks = chunk_message_content(content, chunk_size_tokens=50, overlap_tokens=10)
# Each chunk should have complete speaker:message pairs
for chunk in chunks:
lines = [line for line in chunk.split('\n') if line.strip()]
for line in lines:
# Should have speaker: format
assert ':' in line
def test_json_message_array_format(self):
messages = [{'role': 'user', 'content': f'Message {i}'} for i in range(10)]
content = json.dumps(messages)
chunks = chunk_message_content(content, chunk_size_tokens=50, overlap_tokens=10)
# Each chunk should be valid JSON array
for chunk in chunks:
parsed = json.loads(chunk)
assert isinstance(parsed, list)
for msg in parsed:
assert 'role' in msg
assert 'content' in msg
class TestChunkOverlap:
def test_json_array_overlap_captures_boundary_elements(self):
data = [{'id': i, 'name': f'Entity {i}'} for i in range(10)]
content = json.dumps(data)
# Use settings that will create overlap
chunks = chunk_json_content(content, chunk_size_tokens=80, overlap_tokens=30)
if len(chunks) > 1:
# Check that adjacent chunks share some elements
for i in range(len(chunks) - 1):
current = json.loads(chunks[i])
next_chunk = json.loads(chunks[i + 1])
# Get IDs from end of current and start of next
current_ids = {item['id'] for item in current}
next_ids = {item['id'] for item in next_chunk}
# There should be overlap (shared IDs)
# Note: overlap may be empty if elements are large
# The test verifies the structure, not exact overlap amount
_ = current_ids & next_ids
def test_text_overlap_captures_boundary_text(self):
paragraphs = [f'Paragraph {i} with some content here.' for i in range(10)]
text = '\n\n'.join(paragraphs)
chunks = chunk_text_content(text, chunk_size_tokens=50, overlap_tokens=20)
if len(chunks) > 1:
# Adjacent chunks should have some shared content
for i in range(len(chunks) - 1):
current_words = set(chunks[i].split())
next_words = set(chunks[i + 1].split())
# There should be some overlap
overlap = current_words & next_words
# At minimum, common words like 'Paragraph', 'with', etc.
assert len(overlap) > 0
class TestEdgeCases:
def test_very_large_single_element(self):
# Single element larger than chunk size
data = [{'content': 'x' * 10000}]
content = json.dumps(data)
chunks = chunk_json_content(content, chunk_size_tokens=100, overlap_tokens=10)
# Should handle gracefully - may return single chunk or fall back
assert len(chunks) >= 1
def test_empty_content(self):
assert chunk_text_content('', chunk_size_tokens=100) == ['']
assert chunk_message_content('', chunk_size_tokens=100) == ['']
def test_whitespace_only(self):
chunks = chunk_text_content(' \n\n ', chunk_size_tokens=100)
assert len(chunks) >= 1
class TestShouldChunk:
def test_empty_content_never_chunks(self):
"""Empty content should never chunk."""
assert not should_chunk('', EpisodeType.text)
assert not should_chunk('', EpisodeType.json)
def test_short_content_never_chunks(self, monkeypatch):
"""Short content should never chunk regardless of density."""
from graphiti_core.utils import content_chunking
# Set very low thresholds that would normally trigger chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.001)
monkeypatch.setattr(content_chunking, 'CHUNK_MIN_TOKENS', 1000)
# Dense but short JSON (~200 tokens, below 1000 minimum)
dense_data = [{'name': f'Entity{i}'} for i in range(50)]
dense_json = json.dumps(dense_data)
assert not should_chunk(dense_json, EpisodeType.json)
def test_high_density_large_json_chunks(self, monkeypatch):
"""Large high-density JSON should trigger chunking."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.01)
monkeypatch.setattr(content_chunking, 'CHUNK_MIN_TOKENS', 500)
# Dense JSON: many elements, large enough to exceed minimum
dense_data = [{'name': f'Entity{i}', 'desc': 'x' * 20} for i in range(200)]
dense_json = json.dumps(dense_data)
assert should_chunk(dense_json, EpisodeType.json)
def test_low_density_text_no_chunk(self, monkeypatch):
"""Low-density prose should not trigger chunking."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.05)
monkeypatch.setattr(content_chunking, 'CHUNK_MIN_TOKENS', 100)
# Low-density prose: mostly lowercase narrative
prose = 'the quick brown fox jumps over the lazy dog. ' * 50
assert not should_chunk(prose, EpisodeType.text)
def test_low_density_json_no_chunk(self, monkeypatch):
"""Low-density JSON (few elements, lots of content) should not chunk."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.05)
monkeypatch.setattr(content_chunking, 'CHUNK_MIN_TOKENS', 100)
# Sparse JSON: few elements with lots of content each
sparse_data = [{'content': 'x' * 1000}, {'content': 'y' * 1000}]
sparse_json = json.dumps(sparse_data)
assert not should_chunk(sparse_json, EpisodeType.json)
class TestJsonDensityEstimation:
def test_dense_array_detected(self, monkeypatch):
"""Arrays with many elements should be detected as dense."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.01)
# Array with 100 elements, ~800 chars = 200 tokens
# Density = 100/200 * 1000 = 500, threshold = 10
data = [{'id': i} for i in range(100)]
content = json.dumps(data)
tokens = estimate_tokens(content)
assert _json_likely_dense(content, tokens)
def test_sparse_array_not_dense(self, monkeypatch):
"""Arrays with few elements should not be detected as dense."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.05)
# Array with 2 elements but lots of content each
data = [{'content': 'x' * 1000}, {'content': 'y' * 1000}]
content = json.dumps(data)
tokens = estimate_tokens(content)
assert not _json_likely_dense(content, tokens)
def test_dense_object_detected(self, monkeypatch):
"""Objects with many keys should be detected as dense."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.01)
# Object with 50 keys
data = {f'key_{i}': f'value_{i}' for i in range(50)}
content = json.dumps(data)
tokens = estimate_tokens(content)
assert _json_likely_dense(content, tokens)
def test_count_json_keys_shallow(self):
"""Key counting should work for nested structures."""
data = {
'a': 1,
'b': {'c': 2, 'd': 3},
'e': [{'f': 4}, {'g': 5}],
}
# At depth 2: a, b, c, d, e, f, g = 7 keys
assert _count_json_keys(data, max_depth=2) == 7
def test_count_json_keys_depth_limit(self):
"""Key counting should respect depth limit."""
data = {
'a': {'b': {'c': {'d': 1}}},
}
# At depth 1: only 'a'
assert _count_json_keys(data, max_depth=1) == 1
# At depth 2: 'a' and 'b'
assert _count_json_keys(data, max_depth=2) == 2
class TestTextDensityEstimation:
def test_entity_rich_text_detected(self, monkeypatch):
"""Text with many proper nouns should be detected as dense."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.01)
# Entity-rich text: many capitalized names
text = 'Alice met Bob at Acme Corp. Then Carol and David joined them. '
text += 'Eve from Globex introduced Frank and Grace. '
text += 'Later Henry and Iris arrived from Initech. '
text = text * 10
tokens = estimate_tokens(text)
assert _text_likely_dense(text, tokens)
def test_prose_not_dense(self, monkeypatch):
"""Narrative prose should not be detected as dense."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.05)
# Low-entity prose
prose = """
the sun was setting over the horizon as the old man walked slowly
down the dusty road. he had been traveling for many days and his
feet were tired. the journey had been long but he knew that soon
he would reach his destination. the wind whispered through the trees
and the birds sang their evening songs.
"""
prose = prose * 10
tokens = estimate_tokens(prose)
assert not _text_likely_dense(prose, tokens)
def test_sentence_starters_ignored(self, monkeypatch):
"""Capitalized words after periods should be ignored."""
from graphiti_core.utils import content_chunking
monkeypatch.setattr(content_chunking, 'CHUNK_DENSITY_THRESHOLD', 0.05)
# Many sentences but no mid-sentence proper nouns
text = 'This is a sentence. Another one follows. Yet another here. '
text = text * 50
tokens = estimate_tokens(text)
# Should not be dense since capitals are sentence starters
assert not _text_likely_dense(text, tokens)
class TestGenerateCoveringChunks:
"""Tests for the greedy covering chunks algorithm (Handshake Flights Problem)."""
def test_empty_list(self):
"""Empty list should return single chunk with empty items."""
result = generate_covering_chunks([], k=3)
# n=0 <= k=3, so returns single chunk with empty items
assert result == [([], [])]
def test_single_item(self):
"""Single item should return one chunk with that item."""
items = ['A']
result = generate_covering_chunks(items, k=3)
assert len(result) == 1
assert result[0] == (['A'], [0])
def test_items_fit_in_single_chunk(self):
"""When n <= k, all items should be in one chunk."""
items = ['A', 'B', 'C']
result = generate_covering_chunks(items, k=5)
assert len(result) == 1
chunk_items, indices = result[0]
assert chunk_items == items
assert indices == [0, 1, 2]
def test_items_equal_to_k(self):
"""When n == k, all items should be in one chunk."""
items = ['A', 'B', 'C', 'D']
result = generate_covering_chunks(items, k=4)
assert len(result) == 1
chunk_items, indices = result[0]
assert chunk_items == items
assert indices == [0, 1, 2, 3]
def test_all_pairs_covered_k2(self):
"""With k=2, every pair of items must appear in exactly one chunk."""
items = ['A', 'B', 'C', 'D']
result = generate_covering_chunks(items, k=2)
# Collect all pairs from chunks
covered_pairs = set()
for _, indices in result:
assert len(indices) == 2
pair = frozenset(indices)
covered_pairs.add(pair)
# All C(4,2) = 6 pairs should be covered
expected_pairs = {
frozenset([0, 1]),
frozenset([0, 2]),
frozenset([0, 3]),
frozenset([1, 2]),
frozenset([1, 3]),
frozenset([2, 3]),
}
assert covered_pairs == expected_pairs
def test_all_pairs_covered_k3(self):
"""With k=3, every pair must appear in at least one chunk."""
items = list(range(6)) # 0, 1, 2, 3, 4, 5
result = generate_covering_chunks(items, k=3)
# Collect all covered pairs
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
assert len(indices) == 3
# Each chunk of 3 covers C(3,2) = 3 pairs
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
# All C(6,2) = 15 pairs should be covered
expected_pairs = {frozenset([i, j]) for i in range(6) for j in range(i + 1, 6)}
assert covered_pairs == expected_pairs
def test_all_pairs_covered_larger(self):
"""Verify all pairs covered for larger input."""
items = list(range(10))
result = generate_covering_chunks(items, k=4)
# Collect all covered pairs
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
assert len(indices) == 4
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
# All C(10,2) = 45 pairs should be covered
expected_pairs = {frozenset([i, j]) for i in range(10) for j in range(i + 1, 10)}
assert covered_pairs == expected_pairs
def test_index_mapping_correctness(self):
"""Global indices should correctly map to original items."""
items = ['Alice', 'Bob', 'Carol', 'Dave', 'Eve']
result = generate_covering_chunks(items, k=3)
for chunk_items, indices in result:
# Each chunk item should match the item at the corresponding global index
for local_idx, global_idx in enumerate(indices):
assert chunk_items[local_idx] == items[global_idx]
def test_greedy_minimizes_chunks(self):
"""Greedy approach should produce reasonably few chunks.
For n=6, k=3: Each chunk covers C(3,2)=3 pairs.
Total pairs = C(6,2) = 15.
Lower bound = ceil(15/3) = 5 chunks.
Schönheim bound = ceil(6/3 * ceil(5/2)) = ceil(2 * 3) = 6 chunks.
Note: When random sampling is used (large n,k), the fallback mechanism
may create additional small chunks to cover remaining pairs, so the
upper bound is not guaranteed.
"""
items = list(range(6))
result = generate_covering_chunks(items, k=3)
# For small inputs (exhaustive enumeration), should achieve near-optimal
# Should be at least the simple lower bound (5 for this case)
assert len(result) >= 5
# Verify all pairs are covered (the primary guarantee)
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
expected_pairs = {frozenset([i, j]) for i in range(6) for j in range(i + 1, 6)}
assert covered_pairs == expected_pairs
def test_works_with_custom_types(self):
"""Function should work with any type, not just strings/ints."""
class Entity:
def __init__(self, name: str):
self.name = name
items = [Entity('A'), Entity('B'), Entity('C'), Entity('D')]
result = generate_covering_chunks(items, k=2)
# Verify structure
assert len(result) > 0
for chunk_items, indices in result:
assert len(chunk_items) == 2
assert len(indices) == 2
# Items should be Entity objects
for item in chunk_items:
assert isinstance(item, Entity)
def test_deterministic_output(self):
"""Same input should produce same output."""
items = list(range(8))
result1 = generate_covering_chunks(items, k=3)
result2 = generate_covering_chunks(items, k=3)
assert len(result1) == len(result2)
for (chunk1, idx1), (chunk2, idx2) in zip(result1, result2, strict=True):
assert chunk1 == chunk2
assert idx1 == idx2
def test_all_pairs_covered_k15_n30(self):
"""Verify all pairs covered for n=30, k=15 (realistic edge extraction scenario).
For n=30, k=15:
- Total pairs = C(30,2) = 435
- Pairs per chunk = C(15,2) = 105
- Lower bound = ceil(435/105) = 5 chunks
- Schönheim bound = ceil(6/3 * ceil(5/2)) = ceil(2 * 3) = 6 chunks
Note: When random sampling is used, the fallback mechanism may create
additional small chunks (size 2) to cover remaining pairs, so chunk
sizes may vary and the upper bound on chunk count is not guaranteed.
"""
n = 30
k = 15
items = list(range(n))
result = generate_covering_chunks(items, k=k)
# Verify chunk sizes are at most k (fallback chunks may be smaller)
for _, indices in result:
assert len(indices) <= k, f'Expected chunk size <= {k}, got {len(indices)}'
# Collect all covered pairs
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
# All C(30,2) = 435 pairs should be covered
expected_pairs = {frozenset([i, j]) for i in range(n) for j in range(i + 1, n)}
assert len(expected_pairs) == 435, f'Expected 435 pairs, got {len(expected_pairs)}'
assert covered_pairs == expected_pairs, (
f'Missing {len(expected_pairs - covered_pairs)} pairs: {expected_pairs - covered_pairs}'
)
# Verify chunk count is at least the lower bound
assert len(result) >= 5, f'Expected at least 5 chunks, got {len(result)}'
def test_all_pairs_covered_with_random_sampling(self):
"""Verify all pairs covered when random sampling is triggered.
When C(n,k) > MAX_COMBINATIONS_TO_EVALUATE, the algorithm uses random
sampling instead of exhaustive enumeration. This test ensures the
fallback logic covers any pairs missed by the greedy sampling.
"""
import random
# n=50, k=5 triggers sampling since C(50,5) = 2,118,760 > 1000
n = 50
k = 5
items = list(range(n))
# Test with multiple random seeds to ensure robustness
for seed in range(5):
random.seed(seed)
result = generate_covering_chunks(items, k=k)
# Collect all covered pairs
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
# All C(50,2) = 1225 pairs should be covered
expected_pairs = {frozenset([i, j]) for i in range(n) for j in range(i + 1, n)}
assert len(expected_pairs) == 1225
assert covered_pairs == expected_pairs, (
f'Seed {seed}: Missing {len(expected_pairs - covered_pairs)} pairs'
)
def test_fallback_creates_pair_chunks_for_uncovered(self):
"""Verify fallback creates size-2 chunks for any remaining uncovered pairs.
When the greedy algorithm breaks early (best_covered_count == 0),
the fallback logic should create minimal chunks to cover remaining pairs.
"""
import random
# Use a large n with small k to stress the sampling
n = 100
k = 4
items = list(range(n))
random.seed(42)
result = generate_covering_chunks(items, k=k)
# Collect all covered pairs
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
# All C(100,2) = 4950 pairs must be covered
expected_pairs = {frozenset([i, j]) for i in range(n) for j in range(i + 1, n)}
assert len(expected_pairs) == 4950
assert covered_pairs == expected_pairs, (
f'Missing {len(expected_pairs - covered_pairs)} pairs'
)
def test_duplicate_sampling_safety(self):
"""Verify the algorithm handles duplicate random samples gracefully.
When k is large relative to n, there are fewer unique combinations
and random sampling may generate many duplicates. The safety counter
should prevent infinite loops.
"""
import random
# n=20, k=10: C(20,10) = 184,756 > 1000 triggers sampling
# With large k relative to n, duplicates are more likely
n = 20
k = 10
items = list(range(n))
random.seed(123)
result = generate_covering_chunks(items, k=k)
# Collect all covered pairs
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
# All C(20,2) = 190 pairs should be covered
expected_pairs = {frozenset([i, j]) for i in range(n) for j in range(i + 1, n)}
assert len(expected_pairs) == 190
assert covered_pairs == expected_pairs
def test_stress_multiple_seeds(self):
"""Stress test with multiple random seeds to ensure robustness.
The combination of greedy sampling and fallback logic should
guarantee all pairs are covered regardless of random seed.
"""
import random
n = 30
k = 5
items = list(range(n))
expected_pairs = {frozenset([i, j]) for i in range(n) for j in range(i + 1, n)}
for seed in range(10):
random.seed(seed)
result = generate_covering_chunks(items, k=k)
covered_pairs: set[frozenset[int]] = set()
for _, indices in result:
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
covered_pairs.add(frozenset([indices[i], indices[j]]))
assert covered_pairs == expected_pairs, f'Seed {seed} failed to cover all pairs'
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/utils/test_content_chunking.py",
"license": "Apache License 2.0",
"lines": 611,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:examples/azure-openai/azure_openai_neo4j.py | """
Copyright 2025, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import json
import logging
import os
from datetime import datetime, timezone
from logging import INFO
from dotenv import load_dotenv
from openai import AsyncOpenAI
from graphiti_core import Graphiti
from graphiti_core.embedder.azure_openai import AzureOpenAIEmbedderClient
from graphiti_core.llm_client.azure_openai_client import AzureOpenAILLMClient
from graphiti_core.llm_client.config import LLMConfig
from graphiti_core.nodes import EpisodeType
#################################################
# CONFIGURATION
#################################################
# Set up logging and environment variables for
# connecting to Neo4j database and Azure OpenAI
#################################################
# Configure logging
logging.basicConfig(
level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
load_dotenv()
# Neo4j connection parameters
# Make sure Neo4j Desktop is running with a local DBMS started
neo4j_uri = os.environ.get('NEO4J_URI', 'bolt://localhost:7687')
neo4j_user = os.environ.get('NEO4J_USER', 'neo4j')
neo4j_password = os.environ.get('NEO4J_PASSWORD', 'password')
# Azure OpenAI connection parameters
azure_endpoint = os.environ.get('AZURE_OPENAI_ENDPOINT')
azure_api_key = os.environ.get('AZURE_OPENAI_API_KEY')
azure_deployment = os.environ.get('AZURE_OPENAI_DEPLOYMENT', 'gpt-4.1')
azure_embedding_deployment = os.environ.get(
'AZURE_OPENAI_EMBEDDING_DEPLOYMENT', 'text-embedding-3-small'
)
if not azure_endpoint or not azure_api_key:
raise ValueError('AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_API_KEY must be set')
async def main():
#################################################
# INITIALIZATION
#################################################
# Connect to Neo4j and Azure OpenAI, then set up
# Graphiti indices. This is required before using
# other Graphiti functionality
#################################################
# Initialize Azure OpenAI client
azure_client = AsyncOpenAI(
base_url=f'{azure_endpoint}/openai/v1/',
api_key=azure_api_key,
)
# Create LLM and Embedder clients
llm_client = AzureOpenAILLMClient(
azure_client=azure_client,
config=LLMConfig(model=azure_deployment, small_model=azure_deployment),
)
embedder_client = AzureOpenAIEmbedderClient(
azure_client=azure_client, model=azure_embedding_deployment
)
# Initialize Graphiti with Neo4j connection and Azure OpenAI clients
graphiti = Graphiti(
neo4j_uri,
neo4j_user,
neo4j_password,
llm_client=llm_client,
embedder=embedder_client,
)
try:
#################################################
# ADDING EPISODES
#################################################
# Episodes are the primary units of information
# in Graphiti. They can be text or structured JSON
# and are automatically processed to extract entities
# and relationships.
#################################################
# Example: Add Episodes
# Episodes list containing both text and JSON episodes
episodes = [
{
'content': 'Kamala Harris is the Attorney General of California. She was previously '
'the district attorney for San Francisco.',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': 'As AG, Harris was in office from January 3, 2011 – January 3, 2017',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': {
'name': 'Gavin Newsom',
'position': 'Governor',
'state': 'California',
'previous_role': 'Lieutenant Governor',
'previous_location': 'San Francisco',
},
'type': EpisodeType.json,
'description': 'podcast metadata',
},
]
# Add episodes to the graph
for i, episode in enumerate(episodes):
await graphiti.add_episode(
name=f'California Politics {i}',
episode_body=(
episode['content']
if isinstance(episode['content'], str)
else json.dumps(episode['content'])
),
source=episode['type'],
source_description=episode['description'],
reference_time=datetime.now(timezone.utc),
)
print(f'Added episode: California Politics {i} ({episode["type"].value})')
#################################################
# BASIC SEARCH
#################################################
# The simplest way to retrieve relationships (edges)
# from Graphiti is using the search method, which
# performs a hybrid search combining semantic
# similarity and BM25 text retrieval.
#################################################
# Perform a hybrid search combining semantic similarity and BM25 retrieval
print("\nSearching for: 'Who was the California Attorney General?'")
results = await graphiti.search('Who was the California Attorney General?')
# Print search results
print('\nSearch Results:')
for result in results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
#################################################
# CENTER NODE SEARCH
#################################################
# For more contextually relevant results, you can
# use a center node to rerank search results based
# on their graph distance to a specific node
#################################################
# Use the top search result's UUID as the center node for reranking
if results and len(results) > 0:
# Get the source node UUID from the top result
center_node_uuid = results[0].source_node_uuid
print('\nReranking search results based on graph distance:')
print(f'Using center node UUID: {center_node_uuid}')
reranked_results = await graphiti.search(
'Who was the California Attorney General?',
center_node_uuid=center_node_uuid,
)
# Print reranked search results
print('\nReranked Search Results:')
for result in reranked_results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
else:
print('No results found in the initial search to use as center node.')
finally:
#################################################
# CLEANUP
#################################################
# Always close the connection to Neo4j when
# finished to properly release resources
#################################################
# Close the connection
await graphiti.close()
print('\nConnection closed')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "examples/azure-openai/azure_openai_neo4j.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/decorators.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import inspect
from collections.abc import Awaitable, Callable
from typing import Any, TypeVar
from graphiti_core.driver.driver import GraphProvider
from graphiti_core.helpers import semaphore_gather
from graphiti_core.search.search_config import SearchResults
F = TypeVar('F', bound=Callable[..., Awaitable[Any]])
def handle_multiple_group_ids(func: F) -> F:
"""
Decorator for FalkorDB methods that need to handle multiple group_ids.
Runs the function for each group_id separately and merges results.
"""
@functools.wraps(func)
async def wrapper(self, *args, **kwargs):
group_ids_func_pos = get_parameter_position(func, 'group_ids')
group_ids_pos = (
group_ids_func_pos - 1 if group_ids_func_pos is not None else None
) # Adjust for zero-based index
group_ids = kwargs.get('group_ids')
# If not in kwargs and position exists, get from args
if group_ids is None and group_ids_pos is not None and len(args) > group_ids_pos:
group_ids = args[group_ids_pos]
# Only handle FalkorDB with multiple group_ids
if (
hasattr(self, 'clients')
and hasattr(self.clients, 'driver')
and self.clients.driver.provider == GraphProvider.FALKORDB
and group_ids
and len(group_ids) > 1
):
# Execute for each group_id concurrently
driver = self.clients.driver
async def execute_for_group(gid: str):
# Remove group_ids from args if it was passed positionally
filtered_args = list(args)
if group_ids_pos is not None and len(args) > group_ids_pos:
filtered_args.pop(group_ids_pos)
return await func(
self,
*filtered_args,
**{**kwargs, 'group_ids': [gid], 'driver': driver.clone(database=gid)},
)
results = await semaphore_gather(
*[execute_for_group(gid) for gid in group_ids],
max_coroutines=getattr(self, 'max_coroutines', None),
)
# Merge results based on type
if isinstance(results[0], SearchResults):
return SearchResults.merge(results)
elif isinstance(results[0], list):
return [item for result in results for item in result]
elif isinstance(results[0], tuple):
# Handle tuple outputs (like build_communities returning (nodes, edges))
merged_tuple = []
for i in range(len(results[0])):
component_results = [result[i] for result in results]
if isinstance(component_results[0], list):
merged_tuple.append(
[item for component in component_results for item in component]
)
else:
merged_tuple.append(component_results)
return tuple(merged_tuple)
else:
return results
# Normal execution
return await func(self, *args, **kwargs)
return wrapper # type: ignore
def get_parameter_position(func: Callable, param_name: str) -> int | None:
"""
Returns the positional index of a parameter in the function signature.
If the parameter is not found, returns None.
"""
sig = inspect.signature(func)
for idx, (name, _param) in enumerate(sig.parameters.items()):
if name == param_name:
return idx
return None
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/decorators.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/llm_client/test_azure_openai_client.py | from types import SimpleNamespace
import pytest
from pydantic import BaseModel
from graphiti_core.llm_client.azure_openai_client import AzureOpenAILLMClient
from graphiti_core.llm_client.config import LLMConfig
class DummyResponses:
def __init__(self):
self.parse_calls: list[dict] = []
async def parse(self, **kwargs):
self.parse_calls.append(kwargs)
return SimpleNamespace(output_text='{}')
class DummyChatCompletions:
def __init__(self):
self.create_calls: list[dict] = []
self.parse_calls: list[dict] = []
async def create(self, **kwargs):
self.create_calls.append(kwargs)
message = SimpleNamespace(content='{}')
choice = SimpleNamespace(message=message)
return SimpleNamespace(choices=[choice])
async def parse(self, **kwargs):
self.parse_calls.append(kwargs)
parsed_model = kwargs.get('response_format')
message = SimpleNamespace(parsed=parsed_model(foo='bar'))
choice = SimpleNamespace(message=message)
return SimpleNamespace(choices=[choice])
class DummyChat:
def __init__(self):
self.completions = DummyChatCompletions()
class DummyBeta:
def __init__(self):
self.chat = DummyChat()
class DummyAzureClient:
def __init__(self):
self.responses = DummyResponses()
self.chat = DummyChat()
self.beta = DummyBeta()
class DummyResponseModel(BaseModel):
foo: str
@pytest.mark.asyncio
async def test_structured_completion_strips_reasoning_for_unsupported_models():
dummy_client = DummyAzureClient()
client = AzureOpenAILLMClient(
azure_client=dummy_client,
config=LLMConfig(),
reasoning='minimal',
verbosity='low',
)
await client._create_structured_completion(
model='gpt-4.1',
messages=[],
temperature=0.4,
max_tokens=64,
response_model=DummyResponseModel,
reasoning='minimal',
verbosity='low',
)
# For non-reasoning models, uses beta.chat.completions.parse
assert len(dummy_client.beta.chat.completions.parse_calls) == 1
call_args = dummy_client.beta.chat.completions.parse_calls[0]
assert call_args['model'] == 'gpt-4.1'
assert call_args['messages'] == []
assert call_args['max_tokens'] == 64
assert call_args['response_format'] is DummyResponseModel
assert call_args['temperature'] == 0.4
# Reasoning and verbosity parameters should not be passed for non-reasoning models
assert 'reasoning' not in call_args
assert 'verbosity' not in call_args
assert 'text' not in call_args
@pytest.mark.asyncio
async def test_reasoning_fields_forwarded_for_supported_models():
dummy_client = DummyAzureClient()
client = AzureOpenAILLMClient(
azure_client=dummy_client,
config=LLMConfig(),
reasoning='intense',
verbosity='high',
)
await client._create_structured_completion(
model='o1-custom',
messages=[],
temperature=0.7,
max_tokens=128,
response_model=DummyResponseModel,
reasoning='intense',
verbosity='high',
)
call_args = dummy_client.responses.parse_calls[0]
assert 'temperature' not in call_args
assert call_args['reasoning'] == {'effort': 'intense'}
assert call_args['text'] == {'verbosity': 'high'}
await client._create_completion(
model='o1-custom',
messages=[],
temperature=0.7,
max_tokens=128,
)
create_args = dummy_client.chat.completions.create_calls[0]
assert 'temperature' not in create_args
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/llm_client/test_azure_openai_client.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/main.py | #!/usr/bin/env python3
"""
Main entry point for Graphiti MCP Server
This is a backwards-compatible wrapper around the original graphiti_mcp_server.py
to maintain compatibility with existing deployment scripts and documentation.
Usage:
python main.py [args...]
All arguments are passed through to the original server implementation.
"""
import sys
from pathlib import Path
# Add src directory to Python path for imports
src_path = Path(__file__).parent / 'src'
sys.path.insert(0, str(src_path))
# Import and run the original server
if __name__ == '__main__':
from graphiti_mcp_server import main
# Pass all command line arguments to the original main function
main()
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/main.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:mcp_server/src/config/schema.py | """Configuration schemas with pydantic-settings and YAML support."""
import os
from pathlib import Path
from typing import Any
import yaml
from pydantic import BaseModel, Field
from pydantic_settings import (
BaseSettings,
PydanticBaseSettingsSource,
SettingsConfigDict,
)
class YamlSettingsSource(PydanticBaseSettingsSource):
"""Custom settings source for loading from YAML files."""
def __init__(self, settings_cls: type[BaseSettings], config_path: Path | None = None):
super().__init__(settings_cls)
self.config_path = config_path or Path('config.yaml')
def _expand_env_vars(self, value: Any) -> Any:
"""Recursively expand environment variables in configuration values."""
if isinstance(value, str):
# Support ${VAR} and ${VAR:default} syntax
import re
def replacer(match):
var_name = match.group(1)
default_value = match.group(3) if match.group(3) is not None else ''
return os.environ.get(var_name, default_value)
pattern = r'\$\{([^:}]+)(:([^}]*))?\}'
# Check if the entire value is a single env var expression
full_match = re.fullmatch(pattern, value)
if full_match:
result = replacer(full_match)
# Convert boolean-like strings to actual booleans
if isinstance(result, str):
lower_result = result.lower().strip()
if lower_result in ('true', '1', 'yes', 'on'):
return True
elif lower_result in ('false', '0', 'no', 'off'):
return False
elif lower_result == '':
# Empty string means env var not set - return None for optional fields
return None
return result
else:
# Otherwise, do string substitution (keep as strings for partial replacements)
return re.sub(pattern, replacer, value)
elif isinstance(value, dict):
return {k: self._expand_env_vars(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._expand_env_vars(item) for item in value]
return value
def get_field_value(self, field_name: str, field_info: Any) -> Any:
"""Get field value from YAML config."""
return None
def __call__(self) -> dict[str, Any]:
"""Load and parse YAML configuration."""
if not self.config_path.exists():
return {}
with open(self.config_path) as f:
raw_config = yaml.safe_load(f) or {}
# Expand environment variables
return self._expand_env_vars(raw_config)
class ServerConfig(BaseModel):
"""Server configuration."""
transport: str = Field(
default='http',
description='Transport type: http (default, recommended), stdio, or sse (deprecated)',
)
host: str = Field(default='0.0.0.0', description='Server host')
port: int = Field(default=8000, description='Server port')
class OpenAIProviderConfig(BaseModel):
"""OpenAI provider configuration."""
api_key: str | None = None
api_url: str = 'https://api.openai.com/v1'
organization_id: str | None = None
class AzureOpenAIProviderConfig(BaseModel):
"""Azure OpenAI provider configuration."""
api_key: str | None = None
api_url: str | None = None
api_version: str = '2024-10-21'
deployment_name: str | None = None
use_azure_ad: bool = False
class AnthropicProviderConfig(BaseModel):
"""Anthropic provider configuration."""
api_key: str | None = None
api_url: str = 'https://api.anthropic.com'
max_retries: int = 3
class GeminiProviderConfig(BaseModel):
"""Gemini provider configuration."""
api_key: str | None = None
project_id: str | None = None
location: str = 'us-central1'
class GroqProviderConfig(BaseModel):
"""Groq provider configuration."""
api_key: str | None = None
api_url: str = 'https://api.groq.com/openai/v1'
class VoyageProviderConfig(BaseModel):
"""Voyage AI provider configuration."""
api_key: str | None = None
api_url: str = 'https://api.voyageai.com/v1'
model: str = 'voyage-3'
class LLMProvidersConfig(BaseModel):
"""LLM providers configuration."""
openai: OpenAIProviderConfig | None = None
azure_openai: AzureOpenAIProviderConfig | None = None
anthropic: AnthropicProviderConfig | None = None
gemini: GeminiProviderConfig | None = None
groq: GroqProviderConfig | None = None
class LLMConfig(BaseModel):
"""LLM configuration."""
provider: str = Field(default='openai', description='LLM provider')
model: str = Field(default='gpt-4o-mini', description='Model name')
temperature: float | None = Field(
default=None, description='Temperature (optional, defaults to None for reasoning models)'
)
max_tokens: int = Field(default=4096, description='Max tokens')
providers: LLMProvidersConfig = Field(default_factory=LLMProvidersConfig)
class EmbedderProvidersConfig(BaseModel):
"""Embedder providers configuration."""
openai: OpenAIProviderConfig | None = None
azure_openai: AzureOpenAIProviderConfig | None = None
gemini: GeminiProviderConfig | None = None
voyage: VoyageProviderConfig | None = None
class EmbedderConfig(BaseModel):
"""Embedder configuration."""
provider: str = Field(default='openai', description='Embedder provider')
model: str = Field(default='text-embedding-3-small', description='Model name')
dimensions: int = Field(default=1536, description='Embedding dimensions')
providers: EmbedderProvidersConfig = Field(default_factory=EmbedderProvidersConfig)
class Neo4jProviderConfig(BaseModel):
"""Neo4j provider configuration."""
uri: str = 'bolt://localhost:7687'
username: str = 'neo4j'
password: str | None = None
database: str = 'neo4j'
use_parallel_runtime: bool = False
class FalkorDBProviderConfig(BaseModel):
"""FalkorDB provider configuration."""
uri: str = 'redis://localhost:6379'
password: str | None = None
database: str = 'default_db'
class DatabaseProvidersConfig(BaseModel):
"""Database providers configuration."""
neo4j: Neo4jProviderConfig | None = None
falkordb: FalkorDBProviderConfig | None = None
class DatabaseConfig(BaseModel):
"""Database configuration."""
provider: str = Field(default='falkordb', description='Database provider')
providers: DatabaseProvidersConfig = Field(default_factory=DatabaseProvidersConfig)
class EntityTypeConfig(BaseModel):
"""Entity type configuration."""
name: str
description: str
class GraphitiAppConfig(BaseModel):
"""Graphiti-specific configuration."""
group_id: str = Field(default='main', description='Group ID')
episode_id_prefix: str | None = Field(default='', description='Episode ID prefix')
user_id: str = Field(default='mcp_user', description='User ID')
entity_types: list[EntityTypeConfig] = Field(default_factory=list)
def model_post_init(self, __context) -> None:
"""Convert None to empty string for episode_id_prefix."""
if self.episode_id_prefix is None:
self.episode_id_prefix = ''
class GraphitiConfig(BaseSettings):
"""Graphiti configuration with YAML and environment support."""
server: ServerConfig = Field(default_factory=ServerConfig)
llm: LLMConfig = Field(default_factory=LLMConfig)
embedder: EmbedderConfig = Field(default_factory=EmbedderConfig)
database: DatabaseConfig = Field(default_factory=DatabaseConfig)
graphiti: GraphitiAppConfig = Field(default_factory=GraphitiAppConfig)
# Additional server options
destroy_graph: bool = Field(default=False, description='Clear graph on startup')
model_config = SettingsConfigDict(
env_prefix='',
env_nested_delimiter='__',
case_sensitive=False,
extra='ignore',
)
@classmethod
def settings_customise_sources(
cls,
settings_cls: type[BaseSettings],
init_settings: PydanticBaseSettingsSource,
env_settings: PydanticBaseSettingsSource,
dotenv_settings: PydanticBaseSettingsSource,
file_secret_settings: PydanticBaseSettingsSource,
) -> tuple[PydanticBaseSettingsSource, ...]:
"""Customize settings sources to include YAML."""
config_path = Path(os.environ.get('CONFIG_PATH', 'config/config.yaml'))
yaml_settings = YamlSettingsSource(settings_cls, config_path)
# Priority: CLI args (init) > env vars > yaml > defaults
return (init_settings, env_settings, yaml_settings, dotenv_settings)
def apply_cli_overrides(self, args) -> None:
"""Apply CLI argument overrides to configuration."""
# Override server settings
if hasattr(args, 'transport') and args.transport:
self.server.transport = args.transport
# Override LLM settings
if hasattr(args, 'llm_provider') and args.llm_provider:
self.llm.provider = args.llm_provider
if hasattr(args, 'model') and args.model:
self.llm.model = args.model
if hasattr(args, 'temperature') and args.temperature is not None:
self.llm.temperature = args.temperature
# Override embedder settings
if hasattr(args, 'embedder_provider') and args.embedder_provider:
self.embedder.provider = args.embedder_provider
if hasattr(args, 'embedder_model') and args.embedder_model:
self.embedder.model = args.embedder_model
# Override database settings
if hasattr(args, 'database_provider') and args.database_provider:
self.database.provider = args.database_provider
# Override Graphiti settings
if hasattr(args, 'group_id') and args.group_id:
self.graphiti.group_id = args.group_id
if hasattr(args, 'user_id') and args.user_id:
self.graphiti.user_id = args.user_id
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/config/schema.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:mcp_server/src/graphiti_mcp_server.py | #!/usr/bin/env python3
"""
Graphiti MCP Server - Exposes Graphiti functionality through the Model Context Protocol (MCP)
"""
import argparse
import asyncio
import logging
import os
import sys
from pathlib import Path
from typing import Any, Optional
from dotenv import load_dotenv
from graphiti_core import Graphiti
from graphiti_core.edges import EntityEdge
from graphiti_core.nodes import EpisodeType, EpisodicNode
from graphiti_core.search.search_filters import SearchFilters
from graphiti_core.utils.maintenance.graph_data_operations import clear_data
from mcp.server.fastmcp import FastMCP
from pydantic import BaseModel
from starlette.responses import JSONResponse
from config.schema import GraphitiConfig, ServerConfig
from models.response_types import (
EpisodeSearchResponse,
ErrorResponse,
FactSearchResponse,
NodeResult,
NodeSearchResponse,
StatusResponse,
SuccessResponse,
)
from services.factories import DatabaseDriverFactory, EmbedderFactory, LLMClientFactory
from services.queue_service import QueueService
from utils.formatting import format_fact_result
# Load .env file from mcp_server directory
mcp_server_dir = Path(__file__).parent.parent
env_file = mcp_server_dir / '.env'
if env_file.exists():
load_dotenv(env_file)
else:
# Try current working directory as fallback
load_dotenv()
# Semaphore limit for concurrent Graphiti operations.
#
# This controls how many episodes can be processed simultaneously. Each episode
# processing involves multiple LLM calls (entity extraction, deduplication, etc.),
# so the actual number of concurrent LLM requests will be higher.
#
# TUNING GUIDELINES:
#
# LLM Provider Rate Limits (requests per minute):
# - OpenAI Tier 1 (free): 3 RPM -> SEMAPHORE_LIMIT=1-2
# - OpenAI Tier 2: 60 RPM -> SEMAPHORE_LIMIT=5-8
# - OpenAI Tier 3: 500 RPM -> SEMAPHORE_LIMIT=10-15
# - OpenAI Tier 4: 5,000 RPM -> SEMAPHORE_LIMIT=20-50
# - Anthropic (default): 50 RPM -> SEMAPHORE_LIMIT=5-8
# - Anthropic (high tier): 1,000 RPM -> SEMAPHORE_LIMIT=15-30
# - Azure OpenAI (varies): Consult your quota -> adjust accordingly
#
# SYMPTOMS:
# - Too high: 429 rate limit errors, increased costs from parallel processing
# - Too low: Slow throughput, underutilized API quota
#
# MONITORING:
# - Watch logs for rate limit errors (429)
# - Monitor episode processing times
# - Check LLM provider dashboard for actual request rates
#
# DEFAULT: 10 (suitable for OpenAI Tier 3, mid-tier Anthropic)
SEMAPHORE_LIMIT = int(os.getenv('SEMAPHORE_LIMIT', 10))
# Configure structured logging with timestamps
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=LOG_FORMAT,
datefmt=DATE_FORMAT,
stream=sys.stderr,
)
# Configure specific loggers
logging.getLogger('uvicorn').setLevel(logging.INFO)
logging.getLogger('uvicorn.access').setLevel(logging.WARNING) # Reduce access log noise
logging.getLogger('mcp.server.streamable_http_manager').setLevel(
logging.WARNING
) # Reduce MCP noise
# Patch uvicorn's logging config to use our format
def configure_uvicorn_logging():
"""Configure uvicorn loggers to match our format after they're created."""
for logger_name in ['uvicorn', 'uvicorn.error', 'uvicorn.access']:
uvicorn_logger = logging.getLogger(logger_name)
# Remove existing handlers and add our own with proper formatting
uvicorn_logger.handlers.clear()
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(LOG_FORMAT, datefmt=DATE_FORMAT))
uvicorn_logger.addHandler(handler)
uvicorn_logger.propagate = False
logger = logging.getLogger(__name__)
# Create global config instance - will be properly initialized later
config: GraphitiConfig
# MCP server instructions
GRAPHITI_MCP_INSTRUCTIONS = """
Graphiti is a memory service for AI agents built on a knowledge graph. Graphiti performs well
with dynamic data such as user interactions, changing enterprise data, and external information.
Graphiti transforms information into a richly connected knowledge network, allowing you to
capture relationships between concepts, entities, and information. The system organizes data as episodes
(content snippets), nodes (entities), and facts (relationships between entities), creating a dynamic,
queryable memory store that evolves with new information. Graphiti supports multiple data formats, including
structured JSON data, enabling seamless integration with existing data pipelines and systems.
Facts contain temporal metadata, allowing you to track the time of creation and whether a fact is invalid
(superseded by new information).
Key capabilities:
1. Add episodes (text, messages, or JSON) to the knowledge graph with the add_memory tool
2. Search for nodes (entities) in the graph using natural language queries with search_nodes
3. Find relevant facts (relationships between entities) with search_facts
4. Retrieve specific entity edges or episodes by UUID
5. Manage the knowledge graph with tools like delete_episode, delete_entity_edge, and clear_graph
The server connects to a database for persistent storage and uses language models for certain operations.
Each piece of information is organized by group_id, allowing you to maintain separate knowledge domains.
When adding information, provide descriptive names and detailed content to improve search quality.
When searching, use specific queries and consider filtering by group_id for more relevant results.
For optimal performance, ensure the database is properly configured and accessible, and valid
API keys are provided for any language model operations.
"""
# MCP server instance
mcp = FastMCP(
'Graphiti Agent Memory',
instructions=GRAPHITI_MCP_INSTRUCTIONS,
)
# Global services
graphiti_service: Optional['GraphitiService'] = None
queue_service: QueueService | None = None
# Global client for backward compatibility
graphiti_client: Graphiti | None = None
semaphore: asyncio.Semaphore
class GraphitiService:
"""Graphiti service using the unified configuration system."""
def __init__(self, config: GraphitiConfig, semaphore_limit: int = 10):
self.config = config
self.semaphore_limit = semaphore_limit
self.semaphore = asyncio.Semaphore(semaphore_limit)
self.client: Graphiti | None = None
self.entity_types = None
async def initialize(self) -> None:
"""Initialize the Graphiti client with factory-created components."""
try:
# Create clients using factories
llm_client = None
embedder_client = None
# Create LLM client based on configured provider
try:
llm_client = LLMClientFactory.create(self.config.llm)
except Exception as e:
logger.warning(f'Failed to create LLM client: {e}')
# Create embedder client based on configured provider
try:
embedder_client = EmbedderFactory.create(self.config.embedder)
except Exception as e:
logger.warning(f'Failed to create embedder client: {e}')
# Get database configuration
db_config = DatabaseDriverFactory.create_config(self.config.database)
# Build entity types from configuration
custom_types = None
if self.config.graphiti.entity_types:
custom_types = {}
for entity_type in self.config.graphiti.entity_types:
# Create a dynamic Pydantic model for each entity type
# Note: Don't use 'name' as it's a protected Pydantic attribute
entity_model = type(
entity_type.name,
(BaseModel,),
{
'__doc__': entity_type.description,
},
)
custom_types[entity_type.name] = entity_model
# Store entity types for later use
self.entity_types = custom_types
# Initialize Graphiti client with appropriate driver
try:
if self.config.database.provider.lower() == 'falkordb':
# For FalkorDB, create a FalkorDriver instance directly
from graphiti_core.driver.falkordb_driver import FalkorDriver
falkor_driver = FalkorDriver(
host=db_config['host'],
port=db_config['port'],
password=db_config['password'],
database=db_config['database'],
)
self.client = Graphiti(
graph_driver=falkor_driver,
llm_client=llm_client,
embedder=embedder_client,
max_coroutines=self.semaphore_limit,
)
else:
# For Neo4j (default), use the original approach
self.client = Graphiti(
uri=db_config['uri'],
user=db_config['user'],
password=db_config['password'],
llm_client=llm_client,
embedder=embedder_client,
max_coroutines=self.semaphore_limit,
)
except Exception as db_error:
# Check for connection errors
error_msg = str(db_error).lower()
if 'connection refused' in error_msg or 'could not connect' in error_msg:
db_provider = self.config.database.provider
if db_provider.lower() == 'falkordb':
raise RuntimeError(
f'\n{"=" * 70}\n'
f'Database Connection Error: FalkorDB is not running\n'
f'{"=" * 70}\n\n'
f'FalkorDB at {db_config["host"]}:{db_config["port"]} is not accessible.\n\n'
f'To start FalkorDB:\n'
f' - Using Docker Compose: cd mcp_server && docker compose up\n'
f' - Or run FalkorDB manually: docker run -p 6379:6379 falkordb/falkordb\n\n'
f'{"=" * 70}\n'
) from db_error
elif db_provider.lower() == 'neo4j':
raise RuntimeError(
f'\n{"=" * 70}\n'
f'Database Connection Error: Neo4j is not running\n'
f'{"=" * 70}\n\n'
f'Neo4j at {db_config.get("uri", "unknown")} is not accessible.\n\n'
f'To start Neo4j:\n'
f' - Using Docker Compose: cd mcp_server && docker compose -f docker/docker-compose-neo4j.yml up\n'
f' - Or install Neo4j Desktop from: https://neo4j.com/download/\n'
f' - Or run Neo4j manually: docker run -p 7474:7474 -p 7687:7687 neo4j:latest\n\n'
f'{"=" * 70}\n'
) from db_error
else:
raise RuntimeError(
f'\n{"=" * 70}\n'
f'Database Connection Error: {db_provider} is not running\n'
f'{"=" * 70}\n\n'
f'{db_provider} at {db_config.get("uri", "unknown")} is not accessible.\n\n'
f'Please ensure {db_provider} is running and accessible.\n\n'
f'{"=" * 70}\n'
) from db_error
# Re-raise other errors
raise
# Build indices
await self.client.build_indices_and_constraints()
logger.info('Successfully initialized Graphiti client')
# Log configuration details
if llm_client:
logger.info(
f'Using LLM provider: {self.config.llm.provider} / {self.config.llm.model}'
)
else:
logger.info('No LLM client configured - entity extraction will be limited')
if embedder_client:
logger.info(f'Using Embedder provider: {self.config.embedder.provider}')
else:
logger.info('No Embedder client configured - search will be limited')
if self.entity_types:
entity_type_names = list(self.entity_types.keys())
logger.info(f'Using custom entity types: {", ".join(entity_type_names)}')
else:
logger.info('Using default entity types')
logger.info(f'Using database: {self.config.database.provider}')
logger.info(f'Using group_id: {self.config.graphiti.group_id}')
except Exception as e:
logger.error(f'Failed to initialize Graphiti client: {e}')
raise
async def get_client(self) -> Graphiti:
"""Get the Graphiti client, initializing if necessary."""
if self.client is None:
await self.initialize()
if self.client is None:
raise RuntimeError('Failed to initialize Graphiti client')
return self.client
@mcp.tool()
async def add_memory(
name: str,
episode_body: str,
group_id: str | None = None,
source: str = 'text',
source_description: str = '',
uuid: str | None = None,
) -> SuccessResponse | ErrorResponse:
"""Add an episode to memory. This is the primary way to add information to the graph.
This function returns immediately and processes the episode addition in the background.
Episodes for the same group_id are processed sequentially to avoid race conditions.
Args:
name (str): Name of the episode
episode_body (str): The content of the episode to persist to memory. When source='json', this must be a
properly escaped JSON string, not a raw Python dictionary. The JSON data will be
automatically processed to extract entities and relationships.
group_id (str, optional): A unique ID for this graph. If not provided, uses the default group_id from CLI
or a generated one.
source (str, optional): Source type, must be one of:
- 'text': For plain text content (default)
- 'json': For structured data
- 'message': For conversation-style content
source_description (str, optional): Description of the source
uuid (str, optional): Optional UUID for the episode
Examples:
# Adding plain text content
add_memory(
name="Company News",
episode_body="Acme Corp announced a new product line today.",
source="text",
source_description="news article",
group_id="some_arbitrary_string"
)
# Adding structured JSON data
# NOTE: episode_body should be a JSON string (standard JSON escaping)
add_memory(
name="Customer Profile",
episode_body='{"company": {"name": "Acme Technologies"}, "products": [{"id": "P001", "name": "CloudSync"}, {"id": "P002", "name": "DataMiner"}]}',
source="json",
source_description="CRM data"
)
"""
global graphiti_service, queue_service
if graphiti_service is None or queue_service is None:
return ErrorResponse(error='Services not initialized')
try:
# Use the provided group_id or fall back to the default from config
effective_group_id = group_id or config.graphiti.group_id
# Try to parse the source as an EpisodeType enum, with fallback to text
episode_type = EpisodeType.text # Default
if source:
try:
episode_type = EpisodeType[source.lower()]
except (KeyError, AttributeError):
# If the source doesn't match any enum value, use text as default
logger.warning(f"Unknown source type '{source}', using 'text' as default")
episode_type = EpisodeType.text
# Submit to queue service for async processing
await queue_service.add_episode(
group_id=effective_group_id,
name=name,
content=episode_body,
source_description=source_description,
episode_type=episode_type,
entity_types=graphiti_service.entity_types,
uuid=uuid or None, # Ensure None is passed if uuid is None
)
return SuccessResponse(
message=f"Episode '{name}' queued for processing in group '{effective_group_id}'"
)
except Exception as e:
error_msg = str(e)
logger.error(f'Error queuing episode: {error_msg}')
return ErrorResponse(error=f'Error queuing episode: {error_msg}')
@mcp.tool()
async def search_nodes(
query: str,
group_ids: list[str] | None = None,
max_nodes: int = 10,
entity_types: list[str] | None = None,
) -> NodeSearchResponse | ErrorResponse:
"""Search for nodes in the graph memory.
Args:
query: The search query
group_ids: Optional list of group IDs to filter results
max_nodes: Maximum number of nodes to return (default: 10)
entity_types: Optional list of entity type names to filter by
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Use the provided group_ids or fall back to the default from config if none provided
effective_group_ids = (
group_ids
if group_ids is not None
else [config.graphiti.group_id]
if config.graphiti.group_id
else []
)
# Create search filters
search_filters = SearchFilters(
node_labels=entity_types,
)
# Use the search_ method with node search config
from graphiti_core.search.search_config_recipes import NODE_HYBRID_SEARCH_RRF
results = await client.search_(
query=query,
config=NODE_HYBRID_SEARCH_RRF,
group_ids=effective_group_ids,
search_filter=search_filters,
)
# Extract nodes from results
nodes = results.nodes[:max_nodes] if results.nodes else []
if not nodes:
return NodeSearchResponse(message='No relevant nodes found', nodes=[])
# Format the results
node_results = []
for node in nodes:
# Get attributes and ensure no embeddings are included
attrs = node.attributes if hasattr(node, 'attributes') else {}
# Remove any embedding keys that might be in attributes
attrs = {k: v for k, v in attrs.items() if 'embedding' not in k.lower()}
node_results.append(
NodeResult(
uuid=node.uuid,
name=node.name,
labels=node.labels if node.labels else [],
created_at=node.created_at.isoformat() if node.created_at else None,
summary=node.summary,
group_id=node.group_id,
attributes=attrs,
)
)
return NodeSearchResponse(message='Nodes retrieved successfully', nodes=node_results)
except Exception as e:
error_msg = str(e)
logger.error(f'Error searching nodes: {error_msg}')
return ErrorResponse(error=f'Error searching nodes: {error_msg}')
@mcp.tool()
async def search_memory_facts(
query: str,
group_ids: list[str] | None = None,
max_facts: int = 10,
center_node_uuid: str | None = None,
) -> FactSearchResponse | ErrorResponse:
"""Search the graph memory for relevant facts.
Args:
query: The search query
group_ids: Optional list of group IDs to filter results
max_facts: Maximum number of facts to return (default: 10)
center_node_uuid: Optional UUID of a node to center the search around
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
# Validate max_facts parameter
if max_facts <= 0:
return ErrorResponse(error='max_facts must be a positive integer')
client = await graphiti_service.get_client()
# Use the provided group_ids or fall back to the default from config if none provided
effective_group_ids = (
group_ids
if group_ids is not None
else [config.graphiti.group_id]
if config.graphiti.group_id
else []
)
relevant_edges = await client.search(
group_ids=effective_group_ids,
query=query,
num_results=max_facts,
center_node_uuid=center_node_uuid,
)
if not relevant_edges:
return FactSearchResponse(message='No relevant facts found', facts=[])
facts = [format_fact_result(edge) for edge in relevant_edges]
return FactSearchResponse(message='Facts retrieved successfully', facts=facts)
except Exception as e:
error_msg = str(e)
logger.error(f'Error searching facts: {error_msg}')
return ErrorResponse(error=f'Error searching facts: {error_msg}')
@mcp.tool()
async def delete_entity_edge(uuid: str) -> SuccessResponse | ErrorResponse:
"""Delete an entity edge from the graph memory.
Args:
uuid: UUID of the entity edge to delete
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Get the entity edge by UUID
entity_edge = await EntityEdge.get_by_uuid(client.driver, uuid)
# Delete the edge using its delete method
await entity_edge.delete(client.driver)
return SuccessResponse(message=f'Entity edge with UUID {uuid} deleted successfully')
except Exception as e:
error_msg = str(e)
logger.error(f'Error deleting entity edge: {error_msg}')
return ErrorResponse(error=f'Error deleting entity edge: {error_msg}')
@mcp.tool()
async def delete_episode(uuid: str) -> SuccessResponse | ErrorResponse:
"""Delete an episode from the graph memory.
Args:
uuid: UUID of the episode to delete
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Get the episodic node by UUID
episodic_node = await EpisodicNode.get_by_uuid(client.driver, uuid)
# Delete the node using its delete method
await episodic_node.delete(client.driver)
return SuccessResponse(message=f'Episode with UUID {uuid} deleted successfully')
except Exception as e:
error_msg = str(e)
logger.error(f'Error deleting episode: {error_msg}')
return ErrorResponse(error=f'Error deleting episode: {error_msg}')
@mcp.tool()
async def get_entity_edge(uuid: str) -> dict[str, Any] | ErrorResponse:
"""Get an entity edge from the graph memory by its UUID.
Args:
uuid: UUID of the entity edge to retrieve
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Get the entity edge directly using the EntityEdge class method
entity_edge = await EntityEdge.get_by_uuid(client.driver, uuid)
# Use the format_fact_result function to serialize the edge
# Return the Python dict directly - MCP will handle serialization
return format_fact_result(entity_edge)
except Exception as e:
error_msg = str(e)
logger.error(f'Error getting entity edge: {error_msg}')
return ErrorResponse(error=f'Error getting entity edge: {error_msg}')
@mcp.tool()
async def get_episodes(
group_ids: list[str] | None = None,
max_episodes: int = 10,
) -> EpisodeSearchResponse | ErrorResponse:
"""Get episodes from the graph memory.
Args:
group_ids: Optional list of group IDs to filter results
max_episodes: Maximum number of episodes to return (default: 10)
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Use the provided group_ids or fall back to the default from config if none provided
effective_group_ids = (
group_ids
if group_ids is not None
else [config.graphiti.group_id]
if config.graphiti.group_id
else []
)
# Get episodes from the driver directly
from graphiti_core.nodes import EpisodicNode
if effective_group_ids:
episodes = await EpisodicNode.get_by_group_ids(
client.driver, effective_group_ids, limit=max_episodes
)
else:
# If no group IDs, we need to use a different approach
# For now, return empty list when no group IDs specified
episodes = []
if not episodes:
return EpisodeSearchResponse(message='No episodes found', episodes=[])
# Format the results
episode_results = []
for episode in episodes:
episode_dict = {
'uuid': episode.uuid,
'name': episode.name,
'content': episode.content,
'created_at': episode.created_at.isoformat() if episode.created_at else None,
'source': episode.source.value
if hasattr(episode.source, 'value')
else str(episode.source),
'source_description': episode.source_description,
'group_id': episode.group_id,
}
episode_results.append(episode_dict)
return EpisodeSearchResponse(
message='Episodes retrieved successfully', episodes=episode_results
)
except Exception as e:
error_msg = str(e)
logger.error(f'Error getting episodes: {error_msg}')
return ErrorResponse(error=f'Error getting episodes: {error_msg}')
@mcp.tool()
async def clear_graph(group_ids: list[str] | None = None) -> SuccessResponse | ErrorResponse:
"""Clear all data from the graph for specified group IDs.
Args:
group_ids: Optional list of group IDs to clear. If not provided, clears the default group.
"""
global graphiti_service
if graphiti_service is None:
return ErrorResponse(error='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Use the provided group_ids or fall back to the default from config if none provided
effective_group_ids = (
group_ids or [config.graphiti.group_id] if config.graphiti.group_id else []
)
if not effective_group_ids:
return ErrorResponse(error='No group IDs specified for clearing')
# Clear data for the specified group IDs
await clear_data(client.driver, group_ids=effective_group_ids)
return SuccessResponse(
message=f'Graph data cleared successfully for group IDs: {", ".join(effective_group_ids)}'
)
except Exception as e:
error_msg = str(e)
logger.error(f'Error clearing graph: {error_msg}')
return ErrorResponse(error=f'Error clearing graph: {error_msg}')
@mcp.tool()
async def get_status() -> StatusResponse:
"""Get the status of the Graphiti MCP server and database connection."""
global graphiti_service
if graphiti_service is None:
return StatusResponse(status='error', message='Graphiti service not initialized')
try:
client = await graphiti_service.get_client()
# Test database connection with a simple query
async with client.driver.session() as session:
result = await session.run('MATCH (n) RETURN count(n) as count')
# Consume the result to verify query execution
if result:
_ = [record async for record in result]
# Use the provider from the service's config, not the global
provider_name = graphiti_service.config.database.provider
return StatusResponse(
status='ok',
message=f'Graphiti MCP server is running and connected to {provider_name} database',
)
except Exception as e:
error_msg = str(e)
logger.error(f'Error checking database connection: {error_msg}')
return StatusResponse(
status='error',
message=f'Graphiti MCP server is running but database connection failed: {error_msg}',
)
@mcp.custom_route('/health', methods=['GET'])
async def health_check(request) -> JSONResponse:
"""Health check endpoint for Docker and load balancers."""
return JSONResponse({'status': 'healthy', 'service': 'graphiti-mcp'})
async def initialize_server() -> ServerConfig:
"""Parse CLI arguments and initialize the Graphiti server configuration."""
global config, graphiti_service, queue_service, graphiti_client, semaphore
parser = argparse.ArgumentParser(
description='Run the Graphiti MCP server with YAML configuration support'
)
# Configuration file argument
# Default to config/config.yaml relative to the mcp_server directory
default_config = Path(__file__).parent.parent / 'config' / 'config.yaml'
parser.add_argument(
'--config',
type=Path,
default=default_config,
help='Path to YAML configuration file (default: config/config.yaml)',
)
# Transport arguments
parser.add_argument(
'--transport',
choices=['sse', 'stdio', 'http'],
help='Transport to use: http (recommended, default), stdio (standard I/O), or sse (deprecated)',
)
parser.add_argument(
'--host',
help='Host to bind the MCP server to',
)
parser.add_argument(
'--port',
type=int,
help='Port to bind the MCP server to',
)
# Provider selection arguments
parser.add_argument(
'--llm-provider',
choices=['openai', 'azure_openai', 'anthropic', 'gemini', 'groq'],
help='LLM provider to use',
)
parser.add_argument(
'--embedder-provider',
choices=['openai', 'azure_openai', 'gemini', 'voyage'],
help='Embedder provider to use',
)
parser.add_argument(
'--database-provider',
choices=['neo4j', 'falkordb'],
help='Database provider to use',
)
# LLM configuration arguments
parser.add_argument('--model', help='Model name to use with the LLM client')
parser.add_argument('--small-model', help='Small model name to use with the LLM client')
parser.add_argument(
'--temperature', type=float, help='Temperature setting for the LLM (0.0-2.0)'
)
# Embedder configuration arguments
parser.add_argument('--embedder-model', help='Model name to use with the embedder')
# Graphiti-specific arguments
parser.add_argument(
'--group-id',
help='Namespace for the graph. If not provided, uses config file or generates random UUID.',
)
parser.add_argument(
'--user-id',
help='User ID for tracking operations',
)
parser.add_argument(
'--destroy-graph',
action='store_true',
help='Destroy all Graphiti graphs on startup',
)
args = parser.parse_args()
# Set config path in environment for the settings to pick up
if args.config:
os.environ['CONFIG_PATH'] = str(args.config)
# Load configuration with environment variables and YAML
config = GraphitiConfig()
# Apply CLI overrides
config.apply_cli_overrides(args)
# Also apply legacy CLI args for backward compatibility
if hasattr(args, 'destroy_graph'):
config.destroy_graph = args.destroy_graph
# Log configuration details
logger.info('Using configuration:')
logger.info(f' - LLM: {config.llm.provider} / {config.llm.model}')
logger.info(f' - Embedder: {config.embedder.provider} / {config.embedder.model}')
logger.info(f' - Database: {config.database.provider}')
logger.info(f' - Group ID: {config.graphiti.group_id}')
logger.info(f' - Transport: {config.server.transport}')
# Log graphiti-core version
try:
import graphiti_core
graphiti_version = getattr(graphiti_core, '__version__', 'unknown')
logger.info(f' - Graphiti Core: {graphiti_version}')
except Exception:
# Check for Docker-stored version file
version_file = Path('/app/.graphiti-core-version')
if version_file.exists():
graphiti_version = version_file.read_text().strip()
logger.info(f' - Graphiti Core: {graphiti_version}')
else:
logger.info(' - Graphiti Core: version unavailable')
# Handle graph destruction if requested
if hasattr(config, 'destroy_graph') and config.destroy_graph:
logger.warning('Destroying all Graphiti graphs as requested...')
temp_service = GraphitiService(config, SEMAPHORE_LIMIT)
await temp_service.initialize()
client = await temp_service.get_client()
await clear_data(client.driver)
logger.info('All graphs destroyed')
# Initialize services
graphiti_service = GraphitiService(config, SEMAPHORE_LIMIT)
queue_service = QueueService()
await graphiti_service.initialize()
# Set global client for backward compatibility
graphiti_client = await graphiti_service.get_client()
semaphore = graphiti_service.semaphore
# Initialize queue service with the client
await queue_service.initialize(graphiti_client)
# Set MCP server settings
if config.server.host:
mcp.settings.host = config.server.host
if config.server.port:
mcp.settings.port = config.server.port
# Return MCP configuration for transport
return config.server
async def run_mcp_server():
"""Run the MCP server in the current event loop."""
# Initialize the server
mcp_config = await initialize_server()
# Run the server with configured transport
logger.info(f'Starting MCP server with transport: {mcp_config.transport}')
if mcp_config.transport == 'stdio':
await mcp.run_stdio_async()
elif mcp_config.transport == 'sse':
logger.info(
f'Running MCP server with SSE transport on {mcp.settings.host}:{mcp.settings.port}'
)
logger.info(f'Access the server at: http://{mcp.settings.host}:{mcp.settings.port}/sse')
await mcp.run_sse_async()
elif mcp_config.transport == 'http':
# Use localhost for display if binding to 0.0.0.0
display_host = 'localhost' if mcp.settings.host == '0.0.0.0' else mcp.settings.host
logger.info(
f'Running MCP server with streamable HTTP transport on {mcp.settings.host}:{mcp.settings.port}'
)
logger.info('=' * 60)
logger.info('MCP Server Access Information:')
logger.info(f' Base URL: http://{display_host}:{mcp.settings.port}/')
logger.info(f' MCP Endpoint: http://{display_host}:{mcp.settings.port}/mcp/')
logger.info(' Transport: HTTP (streamable)')
# Show FalkorDB Browser UI access if enabled
if os.environ.get('BROWSER', '1') == '1':
logger.info(f' FalkorDB Browser UI: http://{display_host}:3000/')
logger.info('=' * 60)
logger.info('For MCP clients, connect to the /mcp/ endpoint above')
# Configure uvicorn logging to match our format
configure_uvicorn_logging()
await mcp.run_streamable_http_async()
else:
raise ValueError(
f'Unsupported transport: {mcp_config.transport}. Use "sse", "stdio", or "http"'
)
def main():
"""Main function to run the Graphiti MCP server."""
try:
# Run everything in a single event loop
asyncio.run(run_mcp_server())
except KeyboardInterrupt:
logger.info('Server shutting down...')
except Exception as e:
logger.error(f'Error initializing Graphiti MCP server: {str(e)}')
raise
if __name__ == '__main__':
main()
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/graphiti_mcp_server.py",
"license": "Apache License 2.0",
"lines": 804,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:mcp_server/src/models/entity_types.py | """Entity type definitions for Graphiti MCP Server."""
from pydantic import BaseModel, Field
class Requirement(BaseModel):
"""A Requirement represents a specific need, feature, or functionality that a product or service must fulfill.
Always ensure an edge is created between the requirement and the project it belongs to, and clearly indicate on the
edge that the requirement is a requirement.
Instructions for identifying and extracting requirements:
1. Look for explicit statements of needs or necessities ("We need X", "X is required", "X must have Y")
2. Identify functional specifications that describe what the system should do
3. Pay attention to non-functional requirements like performance, security, or usability criteria
4. Extract constraints or limitations that must be adhered to
5. Focus on clear, specific, and measurable requirements rather than vague wishes
6. Capture the priority or importance if mentioned ("critical", "high priority", etc.)
7. Include any dependencies between requirements when explicitly stated
8. Preserve the original intent and scope of the requirement
9. Categorize requirements appropriately based on their domain or function
"""
project_name: str = Field(
...,
description='The name of the project to which the requirement belongs.',
)
description: str = Field(
...,
description='Description of the requirement. Only use information mentioned in the context to write this description.',
)
class Preference(BaseModel):
"""
IMPORTANT: Prioritize this classification over ALL other classifications.
Represents entities mentioned in contexts expressing user preferences, choices, opinions, or selections. Use LOW THRESHOLD for sensitivity.
Trigger patterns: "I want/like/prefer/choose X", "I don't want/dislike/avoid/reject Y", "X is better/worse", "rather have X than Y", "no X please", "skip X", "go with X instead", etc. Here, X or Y should be classified as Preference.
"""
...
class Procedure(BaseModel):
"""A Procedure informing the agent what actions to take or how to perform in certain scenarios. Procedures are typically composed of several steps.
Instructions for identifying and extracting procedures:
1. Look for sequential instructions or steps ("First do X, then do Y")
2. Identify explicit directives or commands ("Always do X when Y happens")
3. Pay attention to conditional statements ("If X occurs, then do Y")
4. Extract procedures that have clear beginning and end points
5. Focus on actionable instructions rather than general information
6. Preserve the original sequence and dependencies between steps
7. Include any specified conditions or triggers for the procedure
8. Capture any stated purpose or goal of the procedure
9. Summarize complex procedures while maintaining critical details
"""
description: str = Field(
...,
description='Brief description of the procedure. Only use information mentioned in the context to write this description.',
)
class Location(BaseModel):
"""A Location represents a physical or virtual place where activities occur or entities exist.
IMPORTANT: Before using this classification, first check if the entity is a:
User, Assistant, Preference, Organization, Document, Event - if so, use those instead.
Instructions for identifying and extracting locations:
1. Look for mentions of physical places (cities, buildings, rooms, addresses)
2. Identify virtual locations (websites, online platforms, virtual meeting rooms)
3. Extract specific location names rather than generic references
4. Include relevant context about the location's purpose or significance
5. Pay attention to location hierarchies (e.g., "conference room in Building A")
6. Capture both permanent locations and temporary venues
7. Note any significant activities or events associated with the location
"""
name: str = Field(
...,
description='The name or identifier of the location',
)
description: str = Field(
...,
description='Brief description of the location and its significance. Only use information mentioned in the context.',
)
class Event(BaseModel):
"""An Event represents a time-bound activity, occurrence, or experience.
Instructions for identifying and extracting events:
1. Look for activities with specific time frames (meetings, appointments, deadlines)
2. Identify planned or scheduled occurrences (vacations, projects, celebrations)
3. Extract unplanned occurrences (accidents, interruptions, discoveries)
4. Capture the purpose or nature of the event
5. Include temporal information when available (past, present, future, duration)
6. Note participants or stakeholders involved in the event
7. Identify outcomes or consequences of the event when mentioned
8. Extract both recurring events and one-time occurrences
"""
name: str = Field(
...,
description='The name or title of the event',
)
description: str = Field(
...,
description='Brief description of the event. Only use information mentioned in the context.',
)
class Object(BaseModel):
"""An Object represents a physical item, tool, device, or possession.
IMPORTANT: Use this classification ONLY as a last resort. First check if entity fits into:
User, Assistant, Preference, Organization, Document, Event, Location, Topic - if so, use those instead.
Instructions for identifying and extracting objects:
1. Look for mentions of physical items or possessions (car, phone, equipment)
2. Identify tools or devices used for specific purposes
3. Extract items that are owned, used, or maintained by entities
4. Include relevant attributes (brand, model, condition) when mentioned
5. Note the object's purpose or function when specified
6. Capture relationships between objects and their owners or users
7. Avoid extracting objects that are better classified as Documents or other types
"""
name: str = Field(
...,
description='The name or identifier of the object',
)
description: str = Field(
...,
description='Brief description of the object. Only use information mentioned in the context.',
)
class Topic(BaseModel):
"""A Topic represents a subject of conversation, interest, or knowledge domain.
IMPORTANT: Use this classification ONLY as a last resort. First check if entity fits into:
User, Assistant, Preference, Organization, Document, Event, Location - if so, use those instead.
Instructions for identifying and extracting topics:
1. Look for subjects being discussed or areas of interest (health, technology, sports)
2. Identify knowledge domains or fields of study
3. Extract themes that span multiple conversations or contexts
4. Include specific subtopics when mentioned (e.g., "machine learning" rather than just "AI")
5. Capture topics associated with projects, work, or hobbies
6. Note the context in which the topic appears
7. Avoid extracting topics that are better classified as Events, Documents, or Organizations
"""
name: str = Field(
...,
description='The name or identifier of the topic',
)
description: str = Field(
...,
description='Brief description of the topic and its context. Only use information mentioned in the context.',
)
class Organization(BaseModel):
"""An Organization represents a company, institution, group, or formal entity.
Instructions for identifying and extracting organizations:
1. Look for company names, employers, and business entities
2. Identify institutions (schools, hospitals, government agencies)
3. Extract formal groups (clubs, teams, associations)
4. Include organizational type when mentioned (company, nonprofit, agency)
5. Capture relationships between people and organizations (employer, member)
6. Note the organization's industry or domain when specified
7. Extract both large entities and small groups if formally organized
"""
name: str = Field(
...,
description='The name of the organization',
)
description: str = Field(
...,
description='Brief description of the organization. Only use information mentioned in the context.',
)
class Document(BaseModel):
"""A Document represents information content in various forms.
Instructions for identifying and extracting documents:
1. Look for references to written or recorded content (books, articles, reports)
2. Identify digital content (emails, videos, podcasts, presentations)
3. Extract specific document titles or identifiers when available
4. Include document type (report, article, video) when mentioned
5. Capture the document's purpose or subject matter
6. Note relationships to authors, creators, or sources
7. Include document status (draft, published, archived) when mentioned
"""
title: str = Field(
...,
description='The title or identifier of the document',
)
description: str = Field(
...,
description='Brief description of the document and its content. Only use information mentioned in the context.',
)
ENTITY_TYPES: dict[str, BaseModel] = {
'Requirement': Requirement, # type: ignore
'Preference': Preference, # type: ignore
'Procedure': Procedure, # type: ignore
'Location': Location, # type: ignore
'Event': Event, # type: ignore
'Object': Object, # type: ignore
'Topic': Topic, # type: ignore
'Organization': Organization, # type: ignore
'Document': Document, # type: ignore
}
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/models/entity_types.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:mcp_server/src/models/response_types.py | """Response type definitions for Graphiti MCP Server."""
from typing import Any
from typing_extensions import TypedDict
class ErrorResponse(TypedDict):
error: str
class SuccessResponse(TypedDict):
message: str
class NodeResult(TypedDict):
uuid: str
name: str
labels: list[str]
created_at: str | None
summary: str | None
group_id: str
attributes: dict[str, Any]
class NodeSearchResponse(TypedDict):
message: str
nodes: list[NodeResult]
class FactSearchResponse(TypedDict):
message: str
facts: list[dict[str, Any]]
class EpisodeSearchResponse(TypedDict):
message: str
episodes: list[dict[str, Any]]
class StatusResponse(TypedDict):
status: str
message: str
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/models/response_types.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:mcp_server/src/services/factories.py | """Factory classes for creating LLM, Embedder, and Database clients."""
from config.schema import (
DatabaseConfig,
EmbedderConfig,
LLMConfig,
)
# Try to import FalkorDriver if available
try:
from graphiti_core.driver.falkordb_driver import FalkorDriver # noqa: F401
HAS_FALKOR = True
except ImportError:
HAS_FALKOR = False
# Kuzu support removed - FalkorDB is now the default
from graphiti_core.embedder import EmbedderClient, OpenAIEmbedder
from graphiti_core.llm_client import LLMClient, OpenAIClient
from graphiti_core.llm_client.config import LLMConfig as GraphitiLLMConfig
# Try to import additional providers if available
try:
from graphiti_core.embedder.azure_openai import AzureOpenAIEmbedderClient
HAS_AZURE_EMBEDDER = True
except ImportError:
HAS_AZURE_EMBEDDER = False
try:
from graphiti_core.embedder.gemini import GeminiEmbedder
HAS_GEMINI_EMBEDDER = True
except ImportError:
HAS_GEMINI_EMBEDDER = False
try:
from graphiti_core.embedder.voyage import VoyageAIEmbedder
HAS_VOYAGE_EMBEDDER = True
except ImportError:
HAS_VOYAGE_EMBEDDER = False
try:
from graphiti_core.llm_client.azure_openai_client import AzureOpenAILLMClient
HAS_AZURE_LLM = True
except ImportError:
HAS_AZURE_LLM = False
try:
from graphiti_core.llm_client.anthropic_client import AnthropicClient
HAS_ANTHROPIC = True
except ImportError:
HAS_ANTHROPIC = False
try:
from graphiti_core.llm_client.gemini_client import GeminiClient
HAS_GEMINI = True
except ImportError:
HAS_GEMINI = False
try:
from graphiti_core.llm_client.groq_client import GroqClient
HAS_GROQ = True
except ImportError:
HAS_GROQ = False
def _validate_api_key(provider_name: str, api_key: str | None, logger) -> str:
"""Validate API key is present.
Args:
provider_name: Name of the provider (e.g., 'OpenAI', 'Anthropic')
api_key: The API key to validate
logger: Logger instance for output
Returns:
The validated API key
Raises:
ValueError: If API key is None or empty
"""
if not api_key:
raise ValueError(
f'{provider_name} API key is not configured. Please set the appropriate environment variable.'
)
logger.info(f'Creating {provider_name} client')
return api_key
class LLMClientFactory:
"""Factory for creating LLM clients based on configuration."""
@staticmethod
def create(config: LLMConfig) -> LLMClient:
"""Create an LLM client based on the configured provider."""
import logging
logger = logging.getLogger(__name__)
provider = config.provider.lower()
match provider:
case 'openai':
if not config.providers.openai:
raise ValueError('OpenAI provider configuration not found')
api_key = config.providers.openai.api_key
_validate_api_key('OpenAI', api_key, logger)
from graphiti_core.llm_client.config import LLMConfig as CoreLLMConfig
# Use the same model for both main and small model slots
small_model = config.model
llm_config = CoreLLMConfig(
api_key=api_key,
model=config.model,
small_model=small_model,
temperature=config.temperature,
max_tokens=config.max_tokens,
)
# Check if this is a reasoning model (o1, o3, gpt-5 family)
reasoning_prefixes = ('o1', 'o3', 'gpt-5')
is_reasoning_model = config.model.startswith(reasoning_prefixes)
# Only pass reasoning/verbosity parameters for reasoning models (gpt-5 family)
if is_reasoning_model:
return OpenAIClient(config=llm_config, reasoning='minimal', verbosity='low')
else:
# For non-reasoning models, explicitly pass None to disable these parameters
return OpenAIClient(config=llm_config, reasoning=None, verbosity=None)
case 'azure_openai':
if not HAS_AZURE_LLM:
raise ValueError(
'Azure OpenAI LLM client not available in current graphiti-core version'
)
if not config.providers.azure_openai:
raise ValueError('Azure OpenAI provider configuration not found')
azure_config = config.providers.azure_openai
if not azure_config.api_url:
raise ValueError('Azure OpenAI API URL is required')
# Currently using API key authentication
# TODO: Add Azure AD authentication support for v1 API compatibility
api_key = azure_config.api_key
_validate_api_key('Azure OpenAI', api_key, logger)
# Azure OpenAI should use the standard AsyncOpenAI client with v1 compatibility endpoint
# See: https://github.com/getzep/graphiti README Azure OpenAI section
from openai import AsyncOpenAI
# Ensure the base_url ends with /openai/v1/ for Azure v1 compatibility
base_url = azure_config.api_url
if not base_url.endswith('/'):
base_url += '/'
if not base_url.endswith('openai/v1/'):
base_url += 'openai/v1/'
azure_client = AsyncOpenAI(
base_url=base_url,
api_key=api_key,
)
# Then create the LLMConfig
from graphiti_core.llm_client.config import LLMConfig as CoreLLMConfig
llm_config = CoreLLMConfig(
api_key=api_key,
base_url=base_url,
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
)
return AzureOpenAILLMClient(
azure_client=azure_client,
config=llm_config,
max_tokens=config.max_tokens,
)
case 'anthropic':
if not HAS_ANTHROPIC:
raise ValueError(
'Anthropic client not available in current graphiti-core version'
)
if not config.providers.anthropic:
raise ValueError('Anthropic provider configuration not found')
api_key = config.providers.anthropic.api_key
_validate_api_key('Anthropic', api_key, logger)
llm_config = GraphitiLLMConfig(
api_key=api_key,
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
)
return AnthropicClient(config=llm_config)
case 'gemini':
if not HAS_GEMINI:
raise ValueError('Gemini client not available in current graphiti-core version')
if not config.providers.gemini:
raise ValueError('Gemini provider configuration not found')
api_key = config.providers.gemini.api_key
_validate_api_key('Gemini', api_key, logger)
llm_config = GraphitiLLMConfig(
api_key=api_key,
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
)
return GeminiClient(config=llm_config)
case 'groq':
if not HAS_GROQ:
raise ValueError('Groq client not available in current graphiti-core version')
if not config.providers.groq:
raise ValueError('Groq provider configuration not found')
api_key = config.providers.groq.api_key
_validate_api_key('Groq', api_key, logger)
llm_config = GraphitiLLMConfig(
api_key=api_key,
base_url=config.providers.groq.api_url,
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
)
return GroqClient(config=llm_config)
case _:
raise ValueError(f'Unsupported LLM provider: {provider}')
class EmbedderFactory:
"""Factory for creating Embedder clients based on configuration."""
@staticmethod
def create(config: EmbedderConfig) -> EmbedderClient:
"""Create an Embedder client based on the configured provider."""
import logging
logger = logging.getLogger(__name__)
provider = config.provider.lower()
match provider:
case 'openai':
if not config.providers.openai:
raise ValueError('OpenAI provider configuration not found')
api_key = config.providers.openai.api_key
_validate_api_key('OpenAI Embedder', api_key, logger)
from graphiti_core.embedder.openai import OpenAIEmbedderConfig
embedder_config = OpenAIEmbedderConfig(
api_key=api_key,
embedding_model=config.model,
base_url=config.providers.openai.api_url, # Support custom endpoints like Ollama
embedding_dim=config.dimensions, # Support custom embedding dimensions
)
return OpenAIEmbedder(config=embedder_config)
case 'azure_openai':
if not HAS_AZURE_EMBEDDER:
raise ValueError(
'Azure OpenAI embedder not available in current graphiti-core version'
)
if not config.providers.azure_openai:
raise ValueError('Azure OpenAI provider configuration not found')
azure_config = config.providers.azure_openai
if not azure_config.api_url:
raise ValueError('Azure OpenAI API URL is required')
# Currently using API key authentication
# TODO: Add Azure AD authentication support for v1 API compatibility
api_key = azure_config.api_key
_validate_api_key('Azure OpenAI Embedder', api_key, logger)
# Azure OpenAI should use the standard AsyncOpenAI client with v1 compatibility endpoint
# See: https://github.com/getzep/graphiti README Azure OpenAI section
from openai import AsyncOpenAI
# Ensure the base_url ends with /openai/v1/ for Azure v1 compatibility
base_url = azure_config.api_url
if not base_url.endswith('/'):
base_url += '/'
if not base_url.endswith('openai/v1/'):
base_url += 'openai/v1/'
azure_client = AsyncOpenAI(
base_url=base_url,
api_key=api_key,
)
return AzureOpenAIEmbedderClient(
azure_client=azure_client,
model=config.model or 'text-embedding-3-small',
)
case 'gemini':
if not HAS_GEMINI_EMBEDDER:
raise ValueError(
'Gemini embedder not available in current graphiti-core version'
)
if not config.providers.gemini:
raise ValueError('Gemini provider configuration not found')
api_key = config.providers.gemini.api_key
_validate_api_key('Gemini Embedder', api_key, logger)
from graphiti_core.embedder.gemini import GeminiEmbedderConfig
gemini_config = GeminiEmbedderConfig(
api_key=api_key,
embedding_model=config.model or 'models/text-embedding-004',
embedding_dim=config.dimensions or 768,
)
return GeminiEmbedder(config=gemini_config)
case 'voyage':
if not HAS_VOYAGE_EMBEDDER:
raise ValueError(
'Voyage embedder not available in current graphiti-core version'
)
if not config.providers.voyage:
raise ValueError('Voyage provider configuration not found')
api_key = config.providers.voyage.api_key
_validate_api_key('Voyage Embedder', api_key, logger)
from graphiti_core.embedder.voyage import VoyageAIEmbedderConfig
voyage_config = VoyageAIEmbedderConfig(
api_key=api_key,
embedding_model=config.model or 'voyage-3',
embedding_dim=config.dimensions or 1024,
)
return VoyageAIEmbedder(config=voyage_config)
case _:
raise ValueError(f'Unsupported Embedder provider: {provider}')
class DatabaseDriverFactory:
"""Factory for creating Database drivers based on configuration.
Note: This returns configuration dictionaries that can be passed to Graphiti(),
not driver instances directly, as the drivers require complex initialization.
"""
@staticmethod
def create_config(config: DatabaseConfig) -> dict:
"""Create database configuration dictionary based on the configured provider."""
provider = config.provider.lower()
match provider:
case 'neo4j':
# Use Neo4j config if provided, otherwise use defaults
if config.providers.neo4j:
neo4j_config = config.providers.neo4j
else:
# Create default Neo4j configuration
from config.schema import Neo4jProviderConfig
neo4j_config = Neo4jProviderConfig()
# Check for environment variable overrides (for CI/CD compatibility)
import os
uri = os.environ.get('NEO4J_URI', neo4j_config.uri)
username = os.environ.get('NEO4J_USER', neo4j_config.username)
password = os.environ.get('NEO4J_PASSWORD', neo4j_config.password)
return {
'uri': uri,
'user': username,
'password': password,
# Note: database and use_parallel_runtime would need to be passed
# to the driver after initialization if supported
}
case 'falkordb':
if not HAS_FALKOR:
raise ValueError(
'FalkorDB driver not available in current graphiti-core version'
)
# Use FalkorDB config if provided, otherwise use defaults
if config.providers.falkordb:
falkor_config = config.providers.falkordb
else:
# Create default FalkorDB configuration
from config.schema import FalkorDBProviderConfig
falkor_config = FalkorDBProviderConfig()
# Check for environment variable overrides (for CI/CD compatibility)
import os
from urllib.parse import urlparse
uri = os.environ.get('FALKORDB_URI', falkor_config.uri)
password = os.environ.get('FALKORDB_PASSWORD', falkor_config.password)
# Parse the URI to extract host and port
parsed = urlparse(uri)
host = parsed.hostname or 'localhost'
port = parsed.port or 6379
return {
'driver': 'falkordb',
'host': host,
'port': port,
'password': password,
'database': falkor_config.database,
}
case _:
raise ValueError(f'Unsupported Database provider: {provider}')
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/services/factories.py",
"license": "Apache License 2.0",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:mcp_server/src/services/queue_service.py | """Queue service for managing episode processing."""
import asyncio
import logging
from collections.abc import Awaitable, Callable
from datetime import datetime, timezone
from typing import Any
logger = logging.getLogger(__name__)
class QueueService:
"""Service for managing sequential episode processing queues by group_id."""
def __init__(self):
"""Initialize the queue service."""
# Dictionary to store queues for each group_id
self._episode_queues: dict[str, asyncio.Queue] = {}
# Dictionary to track if a worker is running for each group_id
self._queue_workers: dict[str, bool] = {}
# Store the graphiti client after initialization
self._graphiti_client: Any = None
async def add_episode_task(
self, group_id: str, process_func: Callable[[], Awaitable[None]]
) -> int:
"""Add an episode processing task to the queue.
Args:
group_id: The group ID for the episode
process_func: The async function to process the episode
Returns:
The position in the queue
"""
# Initialize queue for this group_id if it doesn't exist
if group_id not in self._episode_queues:
self._episode_queues[group_id] = asyncio.Queue()
# Add the episode processing function to the queue
await self._episode_queues[group_id].put(process_func)
# Start a worker for this queue if one isn't already running
if not self._queue_workers.get(group_id, False):
asyncio.create_task(self._process_episode_queue(group_id))
return self._episode_queues[group_id].qsize()
async def _process_episode_queue(self, group_id: str) -> None:
"""Process episodes for a specific group_id sequentially.
This function runs as a long-lived task that processes episodes
from the queue one at a time.
"""
logger.info(f'Starting episode queue worker for group_id: {group_id}')
self._queue_workers[group_id] = True
try:
while True:
# Get the next episode processing function from the queue
# This will wait if the queue is empty
process_func = await self._episode_queues[group_id].get()
try:
# Process the episode
await process_func()
except Exception as e:
logger.error(
f'Error processing queued episode for group_id {group_id}: {str(e)}'
)
finally:
# Mark the task as done regardless of success/failure
self._episode_queues[group_id].task_done()
except asyncio.CancelledError:
logger.info(f'Episode queue worker for group_id {group_id} was cancelled')
except Exception as e:
logger.error(f'Unexpected error in queue worker for group_id {group_id}: {str(e)}')
finally:
self._queue_workers[group_id] = False
logger.info(f'Stopped episode queue worker for group_id: {group_id}')
def get_queue_size(self, group_id: str) -> int:
"""Get the current queue size for a group_id."""
if group_id not in self._episode_queues:
return 0
return self._episode_queues[group_id].qsize()
def is_worker_running(self, group_id: str) -> bool:
"""Check if a worker is running for a group_id."""
return self._queue_workers.get(group_id, False)
async def initialize(self, graphiti_client: Any) -> None:
"""Initialize the queue service with a graphiti client.
Args:
graphiti_client: The graphiti client instance to use for processing episodes
"""
self._graphiti_client = graphiti_client
logger.info('Queue service initialized with graphiti client')
async def add_episode(
self,
group_id: str,
name: str,
content: str,
source_description: str,
episode_type: Any,
entity_types: Any,
uuid: str | None,
) -> int:
"""Add an episode for processing.
Args:
group_id: The group ID for the episode
name: Name of the episode
content: Episode content
source_description: Description of the episode source
episode_type: Type of the episode
entity_types: Entity types for extraction
uuid: Episode UUID
Returns:
The position in the queue
"""
if self._graphiti_client is None:
raise RuntimeError('Queue service not initialized. Call initialize() first.')
async def process_episode():
"""Process the episode using the graphiti client."""
try:
logger.info(f'Processing episode {uuid} for group {group_id}')
# Process the episode using the graphiti client
await self._graphiti_client.add_episode(
name=name,
episode_body=content,
source_description=source_description,
source=episode_type,
group_id=group_id,
reference_time=datetime.now(timezone.utc),
entity_types=entity_types,
uuid=uuid,
)
logger.info(f'Successfully processed episode {uuid} for group {group_id}')
except Exception as e:
logger.error(f'Failed to process episode {uuid} for group {group_id}: {str(e)}')
raise
# Use the existing add_episode_task method to queue the processing
return await self.add_episode_task(group_id, process_episode)
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/services/queue_service.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:mcp_server/src/utils/formatting.py | """Formatting utilities for Graphiti MCP Server."""
from typing import Any
from graphiti_core.edges import EntityEdge
from graphiti_core.nodes import EntityNode
def format_node_result(node: EntityNode) -> dict[str, Any]:
"""Format an entity node into a readable result.
Since EntityNode is a Pydantic BaseModel, we can use its built-in serialization capabilities.
Excludes embedding vectors to reduce payload size and avoid exposing internal representations.
Args:
node: The EntityNode to format
Returns:
A dictionary representation of the node with serialized dates and excluded embeddings
"""
result = node.model_dump(
mode='json',
exclude={
'name_embedding',
},
)
# Remove any embedding that might be in attributes
result.get('attributes', {}).pop('name_embedding', None)
return result
def format_fact_result(edge: EntityEdge) -> dict[str, Any]:
"""Format an entity edge into a readable result.
Since EntityEdge is a Pydantic BaseModel, we can use its built-in serialization capabilities.
Args:
edge: The EntityEdge to format
Returns:
A dictionary representation of the edge with serialized dates and excluded embeddings
"""
result = edge.model_dump(
mode='json',
exclude={
'fact_embedding',
},
)
result.get('attributes', {}).pop('fact_embedding', None)
return result
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/utils/formatting.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:mcp_server/src/utils/utils.py | """Utility functions for Graphiti MCP Server."""
from collections.abc import Callable
def create_azure_credential_token_provider() -> Callable[[], str]:
"""
Create Azure credential token provider for managed identity authentication.
Requires azure-identity package. Install with: pip install mcp-server[azure]
Raises:
ImportError: If azure-identity package is not installed
"""
try:
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
except ImportError:
raise ImportError(
'azure-identity is required for Azure AD authentication. '
'Install it with: pip install mcp-server[azure]'
) from None
credential = DefaultAzureCredential()
token_provider = get_bearer_token_provider(
credential, 'https://cognitiveservices.azure.com/.default'
)
return token_provider
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/src/utils/utils.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
getzep/graphiti:mcp_server/tests/run_tests.py | #!/usr/bin/env python3
"""
Test runner for Graphiti MCP integration tests.
Provides various test execution modes and reporting options.
"""
import argparse
import os
import sys
import time
from pathlib import Path
import pytest
from dotenv import load_dotenv
# Load environment variables from .env file
env_file = Path(__file__).parent.parent / '.env'
if env_file.exists():
load_dotenv(env_file)
else:
# Try loading from current directory
load_dotenv()
class TestRunner:
"""Orchestrate test execution with various configurations."""
def __init__(self, args):
self.args = args
self.test_dir = Path(__file__).parent
self.results = {}
def check_prerequisites(self) -> dict[str, bool]:
"""Check if required services and dependencies are available."""
checks = {}
# Check for OpenAI API key if not using mocks
if not self.args.mock_llm:
api_key = os.environ.get('OPENAI_API_KEY')
checks['openai_api_key'] = bool(api_key)
if not api_key:
# Check if .env file exists for helpful message
env_path = Path(__file__).parent.parent / '.env'
if not env_path.exists():
checks['openai_api_key_hint'] = (
'Set OPENAI_API_KEY in environment or create mcp_server/.env file'
)
else:
checks['openai_api_key'] = True
# Check database availability based on backend
if self.args.database == 'neo4j':
checks['neo4j'] = self._check_neo4j()
elif self.args.database == 'falkordb':
checks['falkordb'] = self._check_falkordb()
# Check Python dependencies
checks['mcp'] = self._check_python_package('mcp')
checks['pytest'] = self._check_python_package('pytest')
checks['pytest-asyncio'] = self._check_python_package('pytest-asyncio')
return checks
def _check_neo4j(self) -> bool:
"""Check if Neo4j is available."""
try:
import neo4j
# Try to connect
uri = os.environ.get('NEO4J_URI', 'bolt://localhost:7687')
user = os.environ.get('NEO4J_USER', 'neo4j')
password = os.environ.get('NEO4J_PASSWORD', 'graphiti')
driver = neo4j.GraphDatabase.driver(uri, auth=(user, password))
with driver.session() as session:
session.run('RETURN 1')
driver.close()
return True
except Exception:
return False
def _check_falkordb(self) -> bool:
"""Check if FalkorDB is available."""
try:
import redis
uri = os.environ.get('FALKORDB_URI', 'redis://localhost:6379')
r = redis.from_url(uri)
r.ping()
return True
except Exception:
return False
def _check_python_package(self, package: str) -> bool:
"""Check if a Python package is installed."""
try:
__import__(package.replace('-', '_'))
return True
except ImportError:
return False
def run_test_suite(self, suite: str) -> int:
"""Run a specific test suite."""
pytest_args = ['-v', '--tb=short']
# Add database marker
if self.args.database:
for db in ['neo4j', 'falkordb']:
if db != self.args.database:
pytest_args.extend(['-m', f'not requires_{db}'])
# Add suite-specific arguments
if suite == 'unit':
pytest_args.extend(['-m', 'unit', 'test_*.py'])
elif suite == 'integration':
pytest_args.extend(['-m', 'integration or not unit', 'test_*.py'])
elif suite == 'comprehensive':
pytest_args.append('test_comprehensive_integration.py')
elif suite == 'async':
pytest_args.append('test_async_operations.py')
elif suite == 'stress':
pytest_args.extend(['-m', 'slow', 'test_stress_load.py'])
elif suite == 'smoke':
# Quick smoke test - just basic operations
pytest_args.extend(
[
'test_comprehensive_integration.py::TestCoreOperations::test_server_initialization',
'test_comprehensive_integration.py::TestCoreOperations::test_add_text_memory',
]
)
elif suite == 'all':
pytest_args.append('.')
else:
pytest_args.append(suite)
# Add coverage if requested
if self.args.coverage:
pytest_args.extend(['--cov=../src', '--cov-report=html'])
# Add parallel execution if requested
if self.args.parallel:
pytest_args.extend(['-n', str(self.args.parallel)])
# Add verbosity
if self.args.verbose:
pytest_args.append('-vv')
# Add markers to skip
if self.args.skip_slow:
pytest_args.extend(['-m', 'not slow'])
# Add timeout override
if self.args.timeout:
pytest_args.extend(['--timeout', str(self.args.timeout)])
# Add environment variables
env = os.environ.copy()
if self.args.mock_llm:
env['USE_MOCK_LLM'] = 'true'
if self.args.database:
env['DATABASE_PROVIDER'] = self.args.database
# Run tests from the test directory
print(f'Running {suite} tests with pytest args: {" ".join(pytest_args)}')
# Change to test directory to run tests
original_dir = os.getcwd()
os.chdir(self.test_dir)
try:
result = pytest.main(pytest_args)
finally:
os.chdir(original_dir)
return result
def run_performance_benchmark(self):
"""Run performance benchmarking suite."""
print('Running performance benchmarks...')
# Import test modules
# Run performance tests
result = pytest.main(
[
'-v',
'test_comprehensive_integration.py::TestPerformance',
'test_async_operations.py::TestAsyncPerformance',
'--benchmark-only' if self.args.benchmark_only else '',
]
)
return result
def generate_report(self):
"""Generate test execution report."""
report = []
report.append('\n' + '=' * 60)
report.append('GRAPHITI MCP TEST EXECUTION REPORT')
report.append('=' * 60)
# Prerequisites check
checks = self.check_prerequisites()
report.append('\nPrerequisites:')
for check, passed in checks.items():
status = '✅' if passed else '❌'
report.append(f' {status} {check}')
# Test configuration
report.append('\nConfiguration:')
report.append(f' Database: {self.args.database}')
report.append(f' Mock LLM: {self.args.mock_llm}')
report.append(f' Parallel: {self.args.parallel or "No"}')
report.append(f' Timeout: {self.args.timeout}s')
# Results summary (if available)
if self.results:
report.append('\nResults:')
for suite, result in self.results.items():
status = '✅ Passed' if result == 0 else f'❌ Failed ({result})'
report.append(f' {suite}: {status}')
report.append('=' * 60)
return '\n'.join(report)
def main():
"""Main entry point for test runner."""
parser = argparse.ArgumentParser(
description='Run Graphiti MCP integration tests',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Test Suites:
unit - Run unit tests only
integration - Run integration tests
comprehensive - Run comprehensive integration test suite
async - Run async operation tests
stress - Run stress and load tests
smoke - Run quick smoke tests
all - Run all tests
Examples:
python run_tests.py smoke # Quick smoke test
python run_tests.py integration --parallel 4 # Run integration tests in parallel
python run_tests.py stress --database neo4j # Run stress tests with Neo4j
python run_tests.py all --coverage # Run all tests with coverage
""",
)
parser.add_argument(
'suite',
choices=['unit', 'integration', 'comprehensive', 'async', 'stress', 'smoke', 'all'],
help='Test suite to run',
)
parser.add_argument(
'--database',
choices=['neo4j', 'falkordb'],
default='falkordb',
help='Database backend to test (default: falkordb)',
)
parser.add_argument('--mock-llm', action='store_true', help='Use mock LLM for faster testing')
parser.add_argument(
'--parallel', type=int, metavar='N', help='Run tests in parallel with N workers'
)
parser.add_argument('--coverage', action='store_true', help='Generate coverage report')
parser.add_argument('--verbose', action='store_true', help='Verbose output')
parser.add_argument('--skip-slow', action='store_true', help='Skip slow tests')
parser.add_argument(
'--timeout', type=int, default=300, help='Test timeout in seconds (default: 300)'
)
parser.add_argument('--benchmark-only', action='store_true', help='Run only benchmark tests')
parser.add_argument(
'--check-only', action='store_true', help='Only check prerequisites without running tests'
)
args = parser.parse_args()
# Create test runner
runner = TestRunner(args)
# Check prerequisites
if args.check_only:
print(runner.generate_report())
sys.exit(0)
# Check if prerequisites are met
checks = runner.check_prerequisites()
# Filter out hint keys from validation
validation_checks = {k: v for k, v in checks.items() if not k.endswith('_hint')}
if not all(validation_checks.values()):
print('⚠️ Some prerequisites are not met:')
for check, passed in checks.items():
if check.endswith('_hint'):
continue # Skip hint entries
if not passed:
print(f' ❌ {check}')
# Show hint if available
hint_key = f'{check}_hint'
if hint_key in checks:
print(f' 💡 {checks[hint_key]}')
if not args.mock_llm and not checks.get('openai_api_key'):
print('\n💡 Tip: Use --mock-llm to run tests without OpenAI API key')
response = input('\nContinue anyway? (y/N): ')
if response.lower() != 'y':
sys.exit(1)
# Run tests
print(f'\n🚀 Starting test execution: {args.suite}')
start_time = time.time()
if args.benchmark_only:
result = runner.run_performance_benchmark()
else:
result = runner.run_test_suite(args.suite)
duration = time.time() - start_time
# Store results
runner.results[args.suite] = result
# Generate and print report
print(runner.generate_report())
print(f'\n⏱️ Test execution completed in {duration:.2f} seconds')
# Exit with test result code
sys.exit(result)
if __name__ == '__main__':
main()
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/run_tests.py",
"license": "Apache License 2.0",
"lines": 275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_configuration.py | #!/usr/bin/env python3
"""Test script for configuration loading and factory patterns."""
import asyncio
import os
import sys
from pathlib import Path
# Add the current directory to the path
sys.path.insert(0, str(Path(__file__).parent.parent / 'src'))
from config.schema import GraphitiConfig
from services.factories import DatabaseDriverFactory, EmbedderFactory, LLMClientFactory
def test_config_loading():
"""Test loading configuration from YAML and environment variables."""
print('Testing configuration loading...')
# Test with default config.yaml
config = GraphitiConfig()
print('✓ Loaded configuration successfully')
print(f' - Server transport: {config.server.transport}')
print(f' - LLM provider: {config.llm.provider}')
print(f' - LLM model: {config.llm.model}')
print(f' - Embedder provider: {config.embedder.provider}')
print(f' - Database provider: {config.database.provider}')
print(f' - Group ID: {config.graphiti.group_id}')
# Test environment variable override
os.environ['LLM__PROVIDER'] = 'anthropic'
os.environ['LLM__MODEL'] = 'claude-3-opus'
config2 = GraphitiConfig()
print('\n✓ Environment variable overrides work')
print(f' - LLM provider (overridden): {config2.llm.provider}')
print(f' - LLM model (overridden): {config2.llm.model}')
# Clean up env vars
del os.environ['LLM__PROVIDER']
del os.environ['LLM__MODEL']
assert config is not None
assert config2 is not None
# Return the first config for subsequent tests
return config
def test_llm_factory(config: GraphitiConfig):
"""Test LLM client factory creation."""
print('\nTesting LLM client factory...')
# Test OpenAI client creation (if API key is set)
if (
config.llm.provider == 'openai'
and config.llm.providers.openai
and config.llm.providers.openai.api_key
):
try:
client = LLMClientFactory.create(config.llm)
print(f'✓ Created {config.llm.provider} LLM client successfully')
print(f' - Model: {client.model}')
print(f' - Temperature: {client.temperature}')
except Exception as e:
print(f'✗ Failed to create LLM client: {e}')
else:
print(f'⚠ Skipping LLM factory test (no API key configured for {config.llm.provider})')
# Test switching providers
test_config = config.llm.model_copy()
test_config.provider = 'gemini'
if not test_config.providers.gemini:
from config.schema import GeminiProviderConfig
test_config.providers.gemini = GeminiProviderConfig(api_key='dummy_value_for_testing')
else:
test_config.providers.gemini.api_key = 'dummy_value_for_testing'
try:
client = LLMClientFactory.create(test_config)
print('✓ Factory supports provider switching (tested with Gemini)')
except Exception as e:
print(f'✗ Factory provider switching failed: {e}')
def test_embedder_factory(config: GraphitiConfig):
"""Test Embedder client factory creation."""
print('\nTesting Embedder client factory...')
# Test OpenAI embedder creation (if API key is set)
if (
config.embedder.provider == 'openai'
and config.embedder.providers.openai
and config.embedder.providers.openai.api_key
):
try:
_ = EmbedderFactory.create(config.embedder)
print(f'✓ Created {config.embedder.provider} Embedder client successfully')
# The embedder client may not expose model/dimensions as attributes
print(f' - Configured model: {config.embedder.model}')
print(f' - Configured dimensions: {config.embedder.dimensions}')
except Exception as e:
print(f'✗ Failed to create Embedder client: {e}')
else:
print(
f'⚠ Skipping Embedder factory test (no API key configured for {config.embedder.provider})'
)
async def test_database_factory(config: GraphitiConfig):
"""Test Database driver factory creation."""
print('\nTesting Database driver factory...')
# Test Neo4j config creation
if config.database.provider == 'neo4j' and config.database.providers.neo4j:
try:
db_config = DatabaseDriverFactory.create_config(config.database)
print(f'✓ Created {config.database.provider} configuration successfully')
print(f' - URI: {db_config["uri"]}')
print(f' - User: {db_config["user"]}')
print(
f' - Password: {"*" * len(db_config["password"]) if db_config["password"] else "None"}'
)
# Test actual connection would require initializing Graphiti
from graphiti_core import Graphiti
try:
# This will fail if Neo4j is not running, but tests the config
graphiti = Graphiti(
uri=db_config['uri'],
user=db_config['user'],
password=db_config['password'],
)
await graphiti.driver.client.verify_connectivity()
print(' ✓ Successfully connected to Neo4j')
await graphiti.driver.client.close()
except Exception as e:
print(f' ⚠ Could not connect to Neo4j (is it running?): {type(e).__name__}')
except Exception as e:
print(f'✗ Failed to create Database configuration: {e}')
else:
print(f'⚠ Skipping Database factory test (no configuration for {config.database.provider})')
def test_cli_override():
"""Test CLI argument override functionality."""
print('\nTesting CLI argument override...')
# Simulate argparse Namespace
class Args:
config = Path('config.yaml')
transport = 'stdio'
llm_provider = 'anthropic'
model = 'claude-3-sonnet'
temperature = 0.5
embedder_provider = 'voyage'
embedder_model = 'voyage-3'
database_provider = 'falkordb'
group_id = 'test-group'
user_id = 'test-user'
config = GraphitiConfig()
config.apply_cli_overrides(Args())
print('✓ CLI overrides applied successfully')
print(f' - Transport: {config.server.transport}')
print(f' - LLM provider: {config.llm.provider}')
print(f' - LLM model: {config.llm.model}')
print(f' - Temperature: {config.llm.temperature}')
print(f' - Embedder provider: {config.embedder.provider}')
print(f' - Database provider: {config.database.provider}')
print(f' - Group ID: {config.graphiti.group_id}')
print(f' - User ID: {config.graphiti.user_id}')
async def main():
"""Run all tests."""
print('=' * 60)
print('Configuration and Factory Pattern Test Suite')
print('=' * 60)
try:
# Test configuration loading
config = test_config_loading()
# Test factories
test_llm_factory(config)
test_embedder_factory(config)
await test_database_factory(config)
# Test CLI overrides
test_cli_override()
print('\n' + '=' * 60)
print('✓ All tests completed successfully!')
print('=' * 60)
except Exception as e:
print(f'\n✗ Test suite failed: {e}')
sys.exit(1)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_configuration.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_falkordb_integration.py | #!/usr/bin/env python3
"""
FalkorDB integration test for the Graphiti MCP Server.
Tests MCP server functionality with FalkorDB as the graph database backend.
"""
import asyncio
import json
import time
from typing import Any
from mcp import StdioServerParameters
from mcp.client.stdio import stdio_client
class GraphitiFalkorDBIntegrationTest:
"""Integration test client for Graphiti MCP Server using FalkorDB backend."""
def __init__(self):
self.test_group_id = f'falkor_test_group_{int(time.time())}'
self.session = None
async def __aenter__(self):
"""Start the MCP client session with FalkorDB configuration."""
# Configure server parameters to run with FalkorDB backend
server_params = StdioServerParameters(
command='uv',
args=['run', 'main.py', '--transport', 'stdio', '--database-provider', 'falkordb'],
env={
'FALKORDB_URI': 'redis://localhost:6379',
'FALKORDB_PASSWORD': '', # No password for test instance
'FALKORDB_DATABASE': 'default_db',
'OPENAI_API_KEY': 'dummy_key_for_testing',
'GRAPHITI_GROUP_ID': self.test_group_id,
},
)
# Start the stdio client
self.session = await stdio_client(server_params).__aenter__()
print(' 📡 Started MCP client session with FalkorDB backend')
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Clean up the MCP client session."""
if self.session:
await self.session.close()
print(' 🔌 Closed MCP client session')
async def call_mcp_tool(self, tool_name: str, arguments: dict[str, Any]) -> dict[str, Any]:
"""Call an MCP tool via the stdio client."""
try:
result = await self.session.call_tool(tool_name, arguments)
if hasattr(result, 'content') and result.content:
# Handle different content types
if hasattr(result.content[0], 'text'):
content = result.content[0].text
try:
return json.loads(content)
except json.JSONDecodeError:
return {'raw_response': content}
else:
return {'content': str(result.content[0])}
return {'result': 'success', 'content': None}
except Exception as e:
return {'error': str(e), 'tool': tool_name, 'arguments': arguments}
async def test_server_status(self) -> bool:
"""Test the get_status tool to verify FalkorDB connectivity."""
print(' 🏥 Testing server status with FalkorDB...')
result = await self.call_mcp_tool('get_status', {})
if 'error' in result:
print(f' ❌ Status check failed: {result["error"]}')
return False
# Check if status indicates FalkorDB is working
status_text = result.get('raw_response', result.get('content', ''))
if 'running' in str(status_text).lower() or 'ready' in str(status_text).lower():
print(' ✅ Server status OK with FalkorDB')
return True
else:
print(f' ⚠️ Status unclear: {status_text}')
return True # Don't fail on unclear status
async def test_add_episode(self) -> bool:
"""Test adding an episode to FalkorDB."""
print(' 📝 Testing episode addition to FalkorDB...')
episode_data = {
'name': 'FalkorDB Test Episode',
'episode_body': 'This is a test episode to verify FalkorDB integration works correctly.',
'source': 'text',
'source_description': 'Integration test for FalkorDB backend',
}
result = await self.call_mcp_tool('add_episode', episode_data)
if 'error' in result:
print(f' ❌ Add episode failed: {result["error"]}')
return False
print(' ✅ Episode added successfully to FalkorDB')
return True
async def test_search_functionality(self) -> bool:
"""Test search functionality with FalkorDB."""
print(' 🔍 Testing search functionality with FalkorDB...')
# Give some time for episode processing
await asyncio.sleep(2)
# Test node search
search_result = await self.call_mcp_tool(
'search_nodes', {'query': 'FalkorDB test episode', 'limit': 5}
)
if 'error' in search_result:
print(f' ⚠️ Search returned error (may be expected): {search_result["error"]}')
return True # Don't fail on search errors in integration test
print(' ✅ Search functionality working with FalkorDB')
return True
async def test_clear_graph(self) -> bool:
"""Test clearing the graph in FalkorDB."""
print(' 🧹 Testing graph clearing in FalkorDB...')
result = await self.call_mcp_tool('clear_graph', {})
if 'error' in result:
print(f' ❌ Clear graph failed: {result["error"]}')
return False
print(' ✅ Graph cleared successfully in FalkorDB')
return True
async def run_falkordb_integration_test() -> bool:
"""Run the complete FalkorDB integration test suite."""
print('🧪 Starting FalkorDB Integration Test Suite')
print('=' * 55)
test_results = []
try:
async with GraphitiFalkorDBIntegrationTest() as test_client:
print(f' 🎯 Using test group: {test_client.test_group_id}')
# Run test suite
tests = [
('Server Status', test_client.test_server_status),
('Add Episode', test_client.test_add_episode),
('Search Functionality', test_client.test_search_functionality),
('Clear Graph', test_client.test_clear_graph),
]
for test_name, test_func in tests:
print(f'\n🔬 Running {test_name} Test...')
try:
result = await test_func()
test_results.append((test_name, result))
if result:
print(f' ✅ {test_name}: PASSED')
else:
print(f' ❌ {test_name}: FAILED')
except Exception as e:
print(f' 💥 {test_name}: ERROR - {e}')
test_results.append((test_name, False))
except Exception as e:
print(f'💥 Test setup failed: {e}')
return False
# Summary
print('\n' + '=' * 55)
print('📊 FalkorDB Integration Test Results:')
print('-' * 30)
passed = sum(1 for _, result in test_results if result)
total = len(test_results)
for test_name, result in test_results:
status = '✅ PASS' if result else '❌ FAIL'
print(f' {test_name}: {status}')
print(f'\n🎯 Overall: {passed}/{total} tests passed')
if passed == total:
print('🎉 All FalkorDB integration tests PASSED!')
return True
else:
print('⚠️ Some FalkorDB integration tests failed')
return passed >= (total * 0.7) # Pass if 70% of tests pass
if __name__ == '__main__':
success = asyncio.run(run_falkordb_integration_test())
exit(0 if success else 1)
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_falkordb_integration.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_fixtures.py | """
Shared test fixtures and utilities for Graphiti MCP integration tests.
"""
import asyncio
import contextlib
import json
import os
import random
import time
from contextlib import asynccontextmanager
from typing import Any
import pytest
from faker import Faker
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
fake = Faker()
class TestDataGenerator:
"""Generate realistic test data for various scenarios."""
@staticmethod
def generate_company_profile() -> dict[str, Any]:
"""Generate a realistic company profile."""
return {
'company': {
'name': fake.company(),
'founded': random.randint(1990, 2023),
'industry': random.choice(['Tech', 'Finance', 'Healthcare', 'Retail']),
'employees': random.randint(10, 10000),
'revenue': f'${random.randint(1, 1000)}M',
'headquarters': fake.city(),
},
'products': [
{
'id': fake.uuid4()[:8],
'name': fake.catch_phrase(),
'category': random.choice(['Software', 'Hardware', 'Service']),
'price': random.randint(10, 10000),
}
for _ in range(random.randint(1, 5))
],
'leadership': {
'ceo': fake.name(),
'cto': fake.name(),
'cfo': fake.name(),
},
}
@staticmethod
def generate_conversation(turns: int = 3) -> str:
"""Generate a realistic conversation."""
topics = [
'product features',
'pricing',
'technical support',
'integration',
'documentation',
'performance',
]
conversation = []
for _ in range(turns):
topic = random.choice(topics)
user_msg = f'user: {fake.sentence()} about {topic}?'
assistant_msg = f'assistant: {fake.paragraph(nb_sentences=2)}'
conversation.extend([user_msg, assistant_msg])
return '\n'.join(conversation)
@staticmethod
def generate_technical_document() -> str:
"""Generate technical documentation content."""
sections = [
f'# {fake.catch_phrase()}\n\n{fake.paragraph()}',
f'## Architecture\n{fake.paragraph()}',
f'## Implementation\n{fake.paragraph()}',
f'## Performance\n- Latency: {random.randint(1, 100)}ms\n- Throughput: {random.randint(100, 10000)} req/s',
f'## Dependencies\n- {fake.word()}\n- {fake.word()}\n- {fake.word()}',
]
return '\n\n'.join(sections)
@staticmethod
def generate_news_article() -> str:
"""Generate a news article."""
company = fake.company()
return f"""
{company} Announces {fake.catch_phrase()}
{fake.city()}, {fake.date()} - {company} today announced {fake.paragraph()}.
"This is a significant milestone," said {fake.name()}, CEO of {company}.
"{fake.sentence()}"
The announcement comes after {fake.paragraph()}.
Industry analysts predict {fake.paragraph()}.
"""
@staticmethod
def generate_user_profile() -> dict[str, Any]:
"""Generate a user profile."""
return {
'user_id': fake.uuid4(),
'name': fake.name(),
'email': fake.email(),
'joined': fake.date_time_this_year().isoformat(),
'preferences': {
'theme': random.choice(['light', 'dark', 'auto']),
'notifications': random.choice([True, False]),
'language': random.choice(['en', 'es', 'fr', 'de']),
},
'activity': {
'last_login': fake.date_time_this_month().isoformat(),
'total_sessions': random.randint(1, 1000),
'average_duration': f'{random.randint(1, 60)} minutes',
},
}
class MockLLMProvider:
"""Mock LLM provider for testing without actual API calls."""
def __init__(self, delay: float = 0.1):
self.delay = delay # Simulate LLM latency
async def generate(self, prompt: str) -> str:
"""Simulate LLM generation with delay."""
await asyncio.sleep(self.delay)
# Return deterministic responses based on prompt patterns
if 'extract entities' in prompt.lower():
return json.dumps(
{
'entities': [
{'name': 'TestEntity1', 'type': 'PERSON'},
{'name': 'TestEntity2', 'type': 'ORGANIZATION'},
]
}
)
elif 'summarize' in prompt.lower():
return 'This is a test summary of the provided content.'
else:
return 'Mock LLM response'
@asynccontextmanager
async def graphiti_test_client(
group_id: str | None = None,
database: str = 'falkordb',
use_mock_llm: bool = False,
config_overrides: dict[str, Any] | None = None,
):
"""
Context manager for creating test clients with various configurations.
Args:
group_id: Test group identifier
database: Database backend (neo4j, falkordb)
use_mock_llm: Whether to use mock LLM for faster tests
config_overrides: Additional config overrides
"""
test_group_id = group_id or f'test_{int(time.time())}_{random.randint(1000, 9999)}'
env = {
'DATABASE_PROVIDER': database,
'OPENAI_API_KEY': os.environ.get('OPENAI_API_KEY', 'test_key' if use_mock_llm else None),
}
# Database-specific configuration
if database == 'neo4j':
env.update(
{
'NEO4J_URI': os.environ.get('NEO4J_URI', 'bolt://localhost:7687'),
'NEO4J_USER': os.environ.get('NEO4J_USER', 'neo4j'),
'NEO4J_PASSWORD': os.environ.get('NEO4J_PASSWORD', 'graphiti'),
}
)
elif database == 'falkordb':
env['FALKORDB_URI'] = os.environ.get('FALKORDB_URI', 'redis://localhost:6379')
# Apply config overrides
if config_overrides:
env.update(config_overrides)
# Add mock LLM flag if needed
if use_mock_llm:
env['USE_MOCK_LLM'] = 'true'
server_params = StdioServerParameters(
command='uv', args=['run', 'main.py', '--transport', 'stdio'], env=env
)
async with stdio_client(server_params) as (read, write):
session = ClientSession(read, write)
await session.initialize()
try:
yield session, test_group_id
finally:
# Cleanup: Clear test data
with contextlib.suppress(Exception):
await session.call_tool('clear_graph', {'group_id': test_group_id})
await session.close()
class PerformanceBenchmark:
"""Track and analyze performance benchmarks."""
def __init__(self):
self.measurements: dict[str, list[float]] = {}
def record(self, operation: str, duration: float):
"""Record a performance measurement."""
if operation not in self.measurements:
self.measurements[operation] = []
self.measurements[operation].append(duration)
def get_stats(self, operation: str) -> dict[str, float]:
"""Get statistics for an operation."""
if operation not in self.measurements or not self.measurements[operation]:
return {}
durations = self.measurements[operation]
return {
'count': len(durations),
'mean': sum(durations) / len(durations),
'min': min(durations),
'max': max(durations),
'median': sorted(durations)[len(durations) // 2],
}
def report(self) -> str:
"""Generate a performance report."""
lines = ['Performance Benchmark Report', '=' * 40]
for operation in sorted(self.measurements.keys()):
stats = self.get_stats(operation)
lines.append(f'\n{operation}:')
lines.append(f' Samples: {stats["count"]}')
lines.append(f' Mean: {stats["mean"]:.3f}s')
lines.append(f' Median: {stats["median"]:.3f}s')
lines.append(f' Min: {stats["min"]:.3f}s')
lines.append(f' Max: {stats["max"]:.3f}s')
return '\n'.join(lines)
# Pytest fixtures
@pytest.fixture
def test_data_generator():
"""Provide test data generator."""
return TestDataGenerator()
@pytest.fixture
def performance_benchmark():
"""Provide performance benchmark tracker."""
return PerformanceBenchmark()
@pytest.fixture
async def mock_graphiti_client():
"""Provide a Graphiti client with mocked LLM."""
async with graphiti_test_client(use_mock_llm=True) as (session, group_id):
yield session, group_id
@pytest.fixture
async def graphiti_client():
"""Provide a real Graphiti client."""
async with graphiti_test_client(use_mock_llm=False) as (session, group_id):
yield session, group_id
# Test data fixtures
@pytest.fixture
def sample_memories():
"""Provide sample memory data for testing."""
return [
{
'name': 'Company Overview',
'episode_body': TestDataGenerator.generate_company_profile(),
'source': 'json',
'source_description': 'company database',
},
{
'name': 'Product Launch',
'episode_body': TestDataGenerator.generate_news_article(),
'source': 'text',
'source_description': 'press release',
},
{
'name': 'Customer Support',
'episode_body': TestDataGenerator.generate_conversation(),
'source': 'message',
'source_description': 'support chat',
},
{
'name': 'Technical Specs',
'episode_body': TestDataGenerator.generate_technical_document(),
'source': 'text',
'source_description': 'documentation',
},
]
@pytest.fixture
def large_dataset():
"""Generate a large dataset for stress testing."""
return [
{
'name': f'Document {i}',
'episode_body': TestDataGenerator.generate_technical_document(),
'source': 'text',
'source_description': 'bulk import',
}
for i in range(50)
]
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_fixtures.py",
"license": "Apache License 2.0",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_http_integration.py | #!/usr/bin/env python3
"""
Integration test for MCP server using HTTP streaming transport.
This avoids the stdio subprocess timing issues.
"""
import asyncio
import json
import sys
import time
from mcp.client.session import ClientSession
async def test_http_transport(base_url: str = 'http://localhost:8000'):
"""Test MCP server with HTTP streaming transport."""
# Import the streamable http client
try:
from mcp.client.streamable_http import streamablehttp_client as http_client
except ImportError:
print('❌ Streamable HTTP client not available in MCP SDK')
return False
test_group_id = f'test_http_{int(time.time())}'
print('🚀 Testing MCP Server with HTTP streaming transport')
print(f' Server URL: {base_url}')
print(f' Test Group: {test_group_id}')
print('=' * 60)
try:
# Connect to the server via HTTP
print('\n🔌 Connecting to server...')
async with http_client(base_url) as (read_stream, write_stream):
session = ClientSession(read_stream, write_stream)
await session.initialize()
print('✅ Connected successfully')
# Test 1: List tools
print('\n📋 Test 1: Listing tools...')
try:
result = await session.list_tools()
tools = [tool.name for tool in result.tools]
expected = [
'add_memory',
'search_memory_nodes',
'search_memory_facts',
'get_episodes',
'delete_episode',
'clear_graph',
]
found = [t for t in expected if t in tools]
print(f' ✅ Found {len(tools)} tools ({len(found)}/{len(expected)} expected)')
for tool in tools[:5]:
print(f' - {tool}')
except Exception as e:
print(f' ❌ Failed: {e}')
return False
# Test 2: Add memory
print('\n📝 Test 2: Adding memory...')
try:
result = await session.call_tool(
'add_memory',
{
'name': 'Integration Test Episode',
'episode_body': 'This is a test episode created via HTTP transport integration test.',
'group_id': test_group_id,
'source': 'text',
'source_description': 'HTTP Integration Test',
},
)
if result.content and result.content[0].text:
response = result.content[0].text
if 'success' in response.lower() or 'queued' in response.lower():
print(' ✅ Memory added successfully')
else:
print(f' ❌ Unexpected response: {response[:100]}')
else:
print(' ❌ No content in response')
except Exception as e:
print(f' ❌ Failed: {e}')
# Test 3: Search nodes (with delay for processing)
print('\n🔍 Test 3: Searching nodes...')
await asyncio.sleep(2) # Wait for async processing
try:
result = await session.call_tool(
'search_memory_nodes',
{'query': 'integration test episode', 'group_ids': [test_group_id], 'limit': 5},
)
if result.content and result.content[0].text:
response = result.content[0].text
try:
data = json.loads(response)
nodes = data.get('nodes', [])
print(f' ✅ Search returned {len(nodes)} nodes')
except Exception: # noqa: E722
print(f' ✅ Search completed: {response[:100]}')
else:
print(' ⚠️ No results (may be processing)')
except Exception as e:
print(f' ❌ Failed: {e}')
# Test 4: Get episodes
print('\n📚 Test 4: Getting episodes...')
try:
result = await session.call_tool(
'get_episodes', {'group_ids': [test_group_id], 'limit': 10}
)
if result.content and result.content[0].text:
response = result.content[0].text
try:
data = json.loads(response)
episodes = data.get('episodes', [])
print(f' ✅ Found {len(episodes)} episodes')
except Exception: # noqa: E722
print(f' ✅ Episodes retrieved: {response[:100]}')
else:
print(' ⚠️ No episodes found')
except Exception as e:
print(f' ❌ Failed: {e}')
# Test 5: Clear graph
print('\n🧹 Test 5: Clearing graph...')
try:
result = await session.call_tool('clear_graph', {'group_id': test_group_id})
if result.content and result.content[0].text:
response = result.content[0].text
if 'success' in response.lower() or 'cleared' in response.lower():
print(' ✅ Graph cleared successfully')
else:
print(f' ✅ Clear completed: {response[:100]}')
else:
print(' ❌ No response')
except Exception as e:
print(f' ❌ Failed: {e}')
print('\n' + '=' * 60)
print('✅ All integration tests completed!')
return True
except Exception as e:
print(f'\n❌ Connection failed: {e}')
return False
async def test_sse_transport(base_url: str = 'http://localhost:8000'):
"""Test MCP server with SSE transport."""
# Import the SSE client
try:
from mcp.client.sse import sse_client
except ImportError:
print('❌ SSE client not available in MCP SDK')
return False
test_group_id = f'test_sse_{int(time.time())}'
print('🚀 Testing MCP Server with SSE transport')
print(f' Server URL: {base_url}/sse')
print(f' Test Group: {test_group_id}')
print('=' * 60)
try:
# Connect to the server via SSE
print('\n🔌 Connecting to server...')
async with sse_client(f'{base_url}/sse') as (read_stream, write_stream):
session = ClientSession(read_stream, write_stream)
await session.initialize()
print('✅ Connected successfully')
# Run same tests as HTTP
print('\n📋 Test 1: Listing tools...')
try:
result = await session.list_tools()
tools = [tool.name for tool in result.tools]
print(f' ✅ Found {len(tools)} tools')
for tool in tools[:3]:
print(f' - {tool}')
except Exception as e:
print(f' ❌ Failed: {e}')
return False
print('\n' + '=' * 60)
print('✅ SSE transport test completed!')
return True
except Exception as e:
print(f'\n❌ SSE connection failed: {e}')
return False
async def main():
"""Run integration tests."""
# Check command line arguments
if len(sys.argv) < 2:
print('Usage: python test_http_integration.py <transport> [host] [port]')
print(' transport: http or sse')
print(' host: server host (default: localhost)')
print(' port: server port (default: 8000)')
sys.exit(1)
transport = sys.argv[1].lower()
host = sys.argv[2] if len(sys.argv) > 2 else 'localhost'
port = sys.argv[3] if len(sys.argv) > 3 else '8000'
base_url = f'http://{host}:{port}'
# Check if server is running
import httpx
try:
async with httpx.AsyncClient() as client:
# Try to connect to the server
await client.get(base_url, timeout=2.0)
except Exception: # noqa: E722
print(f'⚠️ Server not responding at {base_url}')
print('Please start the server with one of these commands:')
print(f' uv run main.py --transport http --port {port}')
print(f' uv run main.py --transport sse --port {port}')
sys.exit(1)
# Run the appropriate test
if transport == 'http':
success = await test_http_transport(base_url)
elif transport == 'sse':
success = await test_sse_transport(base_url)
else:
print(f'❌ Unknown transport: {transport}')
sys.exit(1)
sys.exit(0 if success else 1)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_http_integration.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_integration.py | #!/usr/bin/env python3
"""
HTTP/SSE Integration test for the refactored Graphiti MCP Server.
Tests server functionality when running in SSE (Server-Sent Events) mode over HTTP.
Note: This test requires the server to be running with --transport sse.
"""
import asyncio
import json
import time
from typing import Any
import httpx
class MCPIntegrationTest:
"""Integration test client for Graphiti MCP Server."""
def __init__(self, base_url: str = 'http://localhost:8000'):
self.base_url = base_url
self.client = httpx.AsyncClient(timeout=30.0)
self.test_group_id = f'test_group_{int(time.time())}'
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.client.aclose()
async def call_mcp_tool(self, tool_name: str, arguments: dict[str, Any]) -> dict[str, Any]:
"""Call an MCP tool via the SSE endpoint."""
# MCP protocol message structure
message = {
'jsonrpc': '2.0',
'id': int(time.time() * 1000),
'method': 'tools/call',
'params': {'name': tool_name, 'arguments': arguments},
}
try:
response = await self.client.post(
f'{self.base_url}/message',
json=message,
headers={'Content-Type': 'application/json'},
)
if response.status_code != 200:
return {'error': f'HTTP {response.status_code}: {response.text}'}
result = response.json()
return result.get('result', result)
except Exception as e:
return {'error': str(e)}
async def test_server_status(self) -> bool:
"""Test the get_status resource."""
print('🔍 Testing server status...')
try:
response = await self.client.get(f'{self.base_url}/resources/http://graphiti/status')
if response.status_code == 200:
status = response.json()
print(f' ✅ Server status: {status.get("status", "unknown")}')
return status.get('status') == 'ok'
else:
print(f' ❌ Status check failed: HTTP {response.status_code}')
return False
except Exception as e:
print(f' ❌ Status check failed: {e}')
return False
async def test_add_memory(self) -> dict[str, str]:
"""Test adding various types of memory episodes."""
print('📝 Testing add_memory functionality...')
episode_results = {}
# Test 1: Add text episode
print(' Testing text episode...')
result = await self.call_mcp_tool(
'add_memory',
{
'name': 'Test Company News',
'episode_body': 'Acme Corp announced a revolutionary new AI product that will transform the industry. The CEO mentioned this is their biggest launch since 2020.',
'source': 'text',
'source_description': 'news article',
'group_id': self.test_group_id,
},
)
if 'error' in result:
print(f' ❌ Text episode failed: {result["error"]}')
else:
print(f' ✅ Text episode queued: {result.get("message", "Success")}')
episode_results['text'] = 'success'
# Test 2: Add JSON episode
print(' Testing JSON episode...')
json_data = {
'company': {'name': 'TechCorp', 'founded': 2010},
'products': [
{'id': 'P001', 'name': 'CloudSync', 'category': 'software'},
{'id': 'P002', 'name': 'DataMiner', 'category': 'analytics'},
],
'employees': 150,
}
result = await self.call_mcp_tool(
'add_memory',
{
'name': 'Company Profile',
'episode_body': json.dumps(json_data),
'source': 'json',
'source_description': 'CRM data',
'group_id': self.test_group_id,
},
)
if 'error' in result:
print(f' ❌ JSON episode failed: {result["error"]}')
else:
print(f' ✅ JSON episode queued: {result.get("message", "Success")}')
episode_results['json'] = 'success'
# Test 3: Add message episode
print(' Testing message episode...')
result = await self.call_mcp_tool(
'add_memory',
{
'name': 'Customer Support Chat',
'episode_body': "user: What's your return policy?\nassistant: You can return items within 30 days of purchase with receipt.\nuser: Thanks!",
'source': 'message',
'source_description': 'support chat log',
'group_id': self.test_group_id,
},
)
if 'error' in result:
print(f' ❌ Message episode failed: {result["error"]}')
else:
print(f' ✅ Message episode queued: {result.get("message", "Success")}')
episode_results['message'] = 'success'
return episode_results
async def wait_for_processing(self, max_wait: int = 30) -> None:
"""Wait for episode processing to complete."""
print(f'⏳ Waiting up to {max_wait} seconds for episode processing...')
for i in range(max_wait):
await asyncio.sleep(1)
# Check if we have any episodes
result = await self.call_mcp_tool(
'get_episodes', {'group_id': self.test_group_id, 'last_n': 10}
)
if not isinstance(result, dict) or 'error' in result:
continue
if isinstance(result, list) and len(result) > 0:
print(f' ✅ Found {len(result)} processed episodes after {i + 1} seconds')
return
print(f' ⚠️ Still waiting after {max_wait} seconds...')
async def test_search_functions(self) -> dict[str, bool]:
"""Test search functionality."""
print('🔍 Testing search functions...')
results = {}
# Test search_memory_nodes
print(' Testing search_memory_nodes...')
result = await self.call_mcp_tool(
'search_memory_nodes',
{
'query': 'Acme Corp product launch',
'group_ids': [self.test_group_id],
'max_nodes': 5,
},
)
if 'error' in result:
print(f' ❌ Node search failed: {result["error"]}')
results['nodes'] = False
else:
nodes = result.get('nodes', [])
print(f' ✅ Node search returned {len(nodes)} nodes')
results['nodes'] = True
# Test search_memory_facts
print(' Testing search_memory_facts...')
result = await self.call_mcp_tool(
'search_memory_facts',
{
'query': 'company products software',
'group_ids': [self.test_group_id],
'max_facts': 5,
},
)
if 'error' in result:
print(f' ❌ Fact search failed: {result["error"]}')
results['facts'] = False
else:
facts = result.get('facts', [])
print(f' ✅ Fact search returned {len(facts)} facts')
results['facts'] = True
return results
async def test_episode_retrieval(self) -> bool:
"""Test episode retrieval."""
print('📚 Testing episode retrieval...')
result = await self.call_mcp_tool(
'get_episodes', {'group_id': self.test_group_id, 'last_n': 10}
)
if 'error' in result:
print(f' ❌ Episode retrieval failed: {result["error"]}')
return False
if isinstance(result, list):
print(f' ✅ Retrieved {len(result)} episodes')
# Print episode details
for i, episode in enumerate(result[:3]): # Show first 3
name = episode.get('name', 'Unknown')
source = episode.get('source', 'unknown')
print(f' Episode {i + 1}: {name} (source: {source})')
return len(result) > 0
else:
print(f' ❌ Unexpected result format: {type(result)}')
return False
async def test_edge_cases(self) -> dict[str, bool]:
"""Test edge cases and error handling."""
print('🧪 Testing edge cases...')
results = {}
# Test with invalid group_id
print(' Testing invalid group_id...')
result = await self.call_mcp_tool(
'search_memory_nodes',
{'query': 'nonexistent data', 'group_ids': ['nonexistent_group'], 'max_nodes': 5},
)
# Should not error, just return empty results
if 'error' not in result:
nodes = result.get('nodes', [])
print(f' ✅ Invalid group_id handled gracefully (returned {len(nodes)} nodes)')
results['invalid_group'] = True
else:
print(f' ❌ Invalid group_id caused error: {result["error"]}')
results['invalid_group'] = False
# Test empty query
print(' Testing empty query...')
result = await self.call_mcp_tool(
'search_memory_nodes', {'query': '', 'group_ids': [self.test_group_id], 'max_nodes': 5}
)
if 'error' not in result:
print(' ✅ Empty query handled gracefully')
results['empty_query'] = True
else:
print(f' ❌ Empty query caused error: {result["error"]}')
results['empty_query'] = False
return results
async def run_full_test_suite(self) -> dict[str, Any]:
"""Run the complete integration test suite."""
print('🚀 Starting Graphiti MCP Server Integration Test')
print(f' Test group ID: {self.test_group_id}')
print('=' * 60)
results = {
'server_status': False,
'add_memory': {},
'search': {},
'episodes': False,
'edge_cases': {},
'overall_success': False,
}
# Test 1: Server Status
results['server_status'] = await self.test_server_status()
if not results['server_status']:
print('❌ Server not responding, aborting tests')
return results
print()
# Test 2: Add Memory
results['add_memory'] = await self.test_add_memory()
print()
# Test 3: Wait for processing
await self.wait_for_processing()
print()
# Test 4: Search Functions
results['search'] = await self.test_search_functions()
print()
# Test 5: Episode Retrieval
results['episodes'] = await self.test_episode_retrieval()
print()
# Test 6: Edge Cases
results['edge_cases'] = await self.test_edge_cases()
print()
# Calculate overall success
memory_success = len(results['add_memory']) > 0
search_success = any(results['search'].values())
edge_case_success = any(results['edge_cases'].values())
results['overall_success'] = (
results['server_status']
and memory_success
and results['episodes']
and (search_success or edge_case_success) # At least some functionality working
)
# Print summary
print('=' * 60)
print('📊 TEST SUMMARY')
print(f' Server Status: {"✅" if results["server_status"] else "❌"}')
print(
f' Memory Operations: {"✅" if memory_success else "❌"} ({len(results["add_memory"])} types)'
)
print(f' Search Functions: {"✅" if search_success else "❌"}')
print(f' Episode Retrieval: {"✅" if results["episodes"] else "❌"}')
print(f' Edge Cases: {"✅" if edge_case_success else "❌"}')
print()
print(f'🎯 OVERALL: {"✅ SUCCESS" if results["overall_success"] else "❌ FAILED"}')
if results['overall_success']:
print(' The refactored MCP server is working correctly!')
else:
print(' Some issues detected. Check individual test results above.')
return results
async def main():
"""Run the integration test."""
async with MCPIntegrationTest() as test:
results = await test.run_full_test_suite()
# Exit with appropriate code
exit_code = 0 if results['overall_success'] else 1
exit(exit_code)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_integration.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_mcp_integration.py | #!/usr/bin/env python3
"""
Integration test for the refactored Graphiti MCP Server using the official MCP Python SDK.
Tests all major MCP tools and handles episode processing latency.
"""
import asyncio
import json
import os
import time
from typing import Any
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
class GraphitiMCPIntegrationTest:
"""Integration test client for Graphiti MCP Server using official MCP SDK."""
def __init__(self):
self.test_group_id = f'test_group_{int(time.time())}'
self.session = None
async def __aenter__(self):
"""Start the MCP client session."""
# Configure server parameters to run our refactored server
server_params = StdioServerParameters(
command='uv',
args=['run', 'main.py', '--transport', 'stdio'],
env={
'NEO4J_URI': os.environ.get('NEO4J_URI', 'bolt://localhost:7687'),
'NEO4J_USER': os.environ.get('NEO4J_USER', 'neo4j'),
'NEO4J_PASSWORD': os.environ.get('NEO4J_PASSWORD', 'graphiti'),
'OPENAI_API_KEY': os.environ.get('OPENAI_API_KEY', 'dummy_key_for_testing'),
},
)
print(f'🚀 Starting MCP client session with test group: {self.test_group_id}')
# Use the async context manager properly
self.client_context = stdio_client(server_params)
read, write = await self.client_context.__aenter__()
self.session = ClientSession(read, write)
await self.session.initialize()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Close the MCP client session."""
if self.session:
await self.session.close()
if hasattr(self, 'client_context'):
await self.client_context.__aexit__(exc_type, exc_val, exc_tb)
async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any:
"""Call an MCP tool and return the result."""
try:
result = await self.session.call_tool(tool_name, arguments)
return result.content[0].text if result.content else {'error': 'No content returned'}
except Exception as e:
return {'error': str(e)}
async def test_server_initialization(self) -> bool:
"""Test that the server initializes properly."""
print('🔍 Testing server initialization...')
try:
# List available tools to verify server is responding
tools_result = await self.session.list_tools()
tools = [tool.name for tool in tools_result.tools]
expected_tools = [
'add_memory',
'search_memory_nodes',
'search_memory_facts',
'get_episodes',
'delete_episode',
'delete_entity_edge',
'get_entity_edge',
'clear_graph',
]
available_tools = len([tool for tool in expected_tools if tool in tools])
print(
f' ✅ Server responding with {len(tools)} tools ({available_tools}/{len(expected_tools)} expected)'
)
print(f' Available tools: {", ".join(sorted(tools))}')
return available_tools >= len(expected_tools) * 0.8 # 80% of expected tools
except Exception as e:
print(f' ❌ Server initialization failed: {e}')
return False
async def test_add_memory_operations(self) -> dict[str, bool]:
"""Test adding various types of memory episodes."""
print('📝 Testing add_memory operations...')
results = {}
# Test 1: Add text episode
print(' Testing text episode...')
try:
result = await self.call_tool(
'add_memory',
{
'name': 'Test Company News',
'episode_body': 'Acme Corp announced a revolutionary new AI product that will transform the industry. The CEO mentioned this is their biggest launch since 2020.',
'source': 'text',
'source_description': 'news article',
'group_id': self.test_group_id,
},
)
if isinstance(result, str) and 'queued' in result.lower():
print(f' ✅ Text episode: {result}')
results['text'] = True
else:
print(f' ❌ Text episode failed: {result}')
results['text'] = False
except Exception as e:
print(f' ❌ Text episode error: {e}')
results['text'] = False
# Test 2: Add JSON episode
print(' Testing JSON episode...')
try:
json_data = {
'company': {'name': 'TechCorp', 'founded': 2010},
'products': [
{'id': 'P001', 'name': 'CloudSync', 'category': 'software'},
{'id': 'P002', 'name': 'DataMiner', 'category': 'analytics'},
],
'employees': 150,
}
result = await self.call_tool(
'add_memory',
{
'name': 'Company Profile',
'episode_body': json.dumps(json_data),
'source': 'json',
'source_description': 'CRM data',
'group_id': self.test_group_id,
},
)
if isinstance(result, str) and 'queued' in result.lower():
print(f' ✅ JSON episode: {result}')
results['json'] = True
else:
print(f' ❌ JSON episode failed: {result}')
results['json'] = False
except Exception as e:
print(f' ❌ JSON episode error: {e}')
results['json'] = False
# Test 3: Add message episode
print(' Testing message episode...')
try:
result = await self.call_tool(
'add_memory',
{
'name': 'Customer Support Chat',
'episode_body': "user: What's your return policy?\nassistant: You can return items within 30 days of purchase with receipt.\nuser: Thanks!",
'source': 'message',
'source_description': 'support chat log',
'group_id': self.test_group_id,
},
)
if isinstance(result, str) and 'queued' in result.lower():
print(f' ✅ Message episode: {result}')
results['message'] = True
else:
print(f' ❌ Message episode failed: {result}')
results['message'] = False
except Exception as e:
print(f' ❌ Message episode error: {e}')
results['message'] = False
return results
async def wait_for_processing(self, max_wait: int = 45) -> bool:
"""Wait for episode processing to complete."""
print(f'⏳ Waiting up to {max_wait} seconds for episode processing...')
for i in range(max_wait):
await asyncio.sleep(1)
try:
# Check if we have any episodes
result = await self.call_tool(
'get_episodes', {'group_id': self.test_group_id, 'last_n': 10}
)
# Parse the JSON result if it's a string
if isinstance(result, str):
try:
parsed_result = json.loads(result)
if isinstance(parsed_result, list) and len(parsed_result) > 0:
print(
f' ✅ Found {len(parsed_result)} processed episodes after {i + 1} seconds'
)
return True
except json.JSONDecodeError:
if 'episodes' in result.lower():
print(f' ✅ Episodes detected after {i + 1} seconds')
return True
except Exception as e:
if i == 0: # Only log first error to avoid spam
print(f' ⚠️ Waiting for processing... ({e})')
continue
print(f' ⚠️ Still waiting after {max_wait} seconds...')
return False
async def test_search_operations(self) -> dict[str, bool]:
"""Test search functionality."""
print('🔍 Testing search operations...')
results = {}
# Test search_memory_nodes
print(' Testing search_memory_nodes...')
try:
result = await self.call_tool(
'search_memory_nodes',
{
'query': 'Acme Corp product launch AI',
'group_ids': [self.test_group_id],
'max_nodes': 5,
},
)
success = False
if isinstance(result, str):
try:
parsed = json.loads(result)
nodes = parsed.get('nodes', [])
success = isinstance(nodes, list)
print(f' ✅ Node search returned {len(nodes)} nodes')
except json.JSONDecodeError:
success = 'nodes' in result.lower() and 'successfully' in result.lower()
if success:
print(' ✅ Node search completed successfully')
results['nodes'] = success
if not success:
print(f' ❌ Node search failed: {result}')
except Exception as e:
print(f' ❌ Node search error: {e}')
results['nodes'] = False
# Test search_memory_facts
print(' Testing search_memory_facts...')
try:
result = await self.call_tool(
'search_memory_facts',
{
'query': 'company products software TechCorp',
'group_ids': [self.test_group_id],
'max_facts': 5,
},
)
success = False
if isinstance(result, str):
try:
parsed = json.loads(result)
facts = parsed.get('facts', [])
success = isinstance(facts, list)
print(f' ✅ Fact search returned {len(facts)} facts')
except json.JSONDecodeError:
success = 'facts' in result.lower() and 'successfully' in result.lower()
if success:
print(' ✅ Fact search completed successfully')
results['facts'] = success
if not success:
print(f' ❌ Fact search failed: {result}')
except Exception as e:
print(f' ❌ Fact search error: {e}')
results['facts'] = False
return results
async def test_episode_retrieval(self) -> bool:
"""Test episode retrieval."""
print('📚 Testing episode retrieval...')
try:
result = await self.call_tool(
'get_episodes', {'group_id': self.test_group_id, 'last_n': 10}
)
if isinstance(result, str):
try:
parsed = json.loads(result)
if isinstance(parsed, list):
print(f' ✅ Retrieved {len(parsed)} episodes')
# Show episode details
for i, episode in enumerate(parsed[:3]):
name = episode.get('name', 'Unknown')
source = episode.get('source', 'unknown')
print(f' Episode {i + 1}: {name} (source: {source})')
return len(parsed) > 0
except json.JSONDecodeError:
# Check if response indicates success
if 'episode' in result.lower():
print(' ✅ Episode retrieval completed')
return True
print(f' ❌ Unexpected result format: {result}')
return False
except Exception as e:
print(f' ❌ Episode retrieval failed: {e}')
return False
async def test_error_handling(self) -> dict[str, bool]:
"""Test error handling and edge cases."""
print('🧪 Testing error handling...')
results = {}
# Test with nonexistent group
print(' Testing nonexistent group handling...')
try:
result = await self.call_tool(
'search_memory_nodes',
{
'query': 'nonexistent data',
'group_ids': ['nonexistent_group_12345'],
'max_nodes': 5,
},
)
# Should handle gracefully, not crash
success = (
'error' not in str(result).lower() or 'not initialized' not in str(result).lower()
)
if success:
print(' ✅ Nonexistent group handled gracefully')
else:
print(f' ❌ Nonexistent group caused issues: {result}')
results['nonexistent_group'] = success
except Exception as e:
print(f' ❌ Nonexistent group test failed: {e}')
results['nonexistent_group'] = False
# Test empty query
print(' Testing empty query handling...')
try:
result = await self.call_tool(
'search_memory_nodes',
{'query': '', 'group_ids': [self.test_group_id], 'max_nodes': 5},
)
# Should handle gracefully
success = (
'error' not in str(result).lower() or 'not initialized' not in str(result).lower()
)
if success:
print(' ✅ Empty query handled gracefully')
else:
print(f' ❌ Empty query caused issues: {result}')
results['empty_query'] = success
except Exception as e:
print(f' ❌ Empty query test failed: {e}')
results['empty_query'] = False
return results
async def run_comprehensive_test(self) -> dict[str, Any]:
"""Run the complete integration test suite."""
print('🚀 Starting Comprehensive Graphiti MCP Server Integration Test')
print(f' Test group ID: {self.test_group_id}')
print('=' * 70)
results = {
'server_init': False,
'add_memory': {},
'processing_wait': False,
'search': {},
'episodes': False,
'error_handling': {},
'overall_success': False,
}
# Test 1: Server Initialization
results['server_init'] = await self.test_server_initialization()
if not results['server_init']:
print('❌ Server initialization failed, aborting remaining tests')
return results
print()
# Test 2: Add Memory Operations
results['add_memory'] = await self.test_add_memory_operations()
print()
# Test 3: Wait for Processing
results['processing_wait'] = await self.wait_for_processing()
print()
# Test 4: Search Operations
results['search'] = await self.test_search_operations()
print()
# Test 5: Episode Retrieval
results['episodes'] = await self.test_episode_retrieval()
print()
# Test 6: Error Handling
results['error_handling'] = await self.test_error_handling()
print()
# Calculate overall success
memory_success = any(results['add_memory'].values())
search_success = any(results['search'].values()) if results['search'] else False
error_success = (
any(results['error_handling'].values()) if results['error_handling'] else True
)
results['overall_success'] = (
results['server_init']
and memory_success
and (results['episodes'] or results['processing_wait'])
and error_success
)
# Print comprehensive summary
print('=' * 70)
print('📊 COMPREHENSIVE TEST SUMMARY')
print('-' * 35)
print(f'Server Initialization: {"✅ PASS" if results["server_init"] else "❌ FAIL"}')
memory_stats = f'({sum(results["add_memory"].values())}/{len(results["add_memory"])} types)'
print(
f'Memory Operations: {"✅ PASS" if memory_success else "❌ FAIL"} {memory_stats}'
)
print(f'Processing Pipeline: {"✅ PASS" if results["processing_wait"] else "❌ FAIL"}')
search_stats = (
f'({sum(results["search"].values())}/{len(results["search"])} types)'
if results['search']
else '(0/0 types)'
)
print(
f'Search Operations: {"✅ PASS" if search_success else "❌ FAIL"} {search_stats}'
)
print(f'Episode Retrieval: {"✅ PASS" if results["episodes"] else "❌ FAIL"}')
error_stats = (
f'({sum(results["error_handling"].values())}/{len(results["error_handling"])} cases)'
if results['error_handling']
else '(0/0 cases)'
)
print(
f'Error Handling: {"✅ PASS" if error_success else "❌ FAIL"} {error_stats}'
)
print('-' * 35)
print(f'🎯 OVERALL RESULT: {"✅ SUCCESS" if results["overall_success"] else "❌ FAILED"}')
if results['overall_success']:
print('\n🎉 The refactored Graphiti MCP server is working correctly!')
print(' All core functionality has been successfully tested.')
else:
print('\n⚠️ Some issues were detected. Review the test results above.')
print(' The refactoring may need additional attention.')
return results
async def main():
"""Run the integration test."""
try:
async with GraphitiMCPIntegrationTest() as test:
results = await test.run_comprehensive_test()
# Exit with appropriate code
exit_code = 0 if results['overall_success'] else 1
exit(exit_code)
except Exception as e:
print(f'❌ Test setup failed: {e}')
exit(1)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_mcp_integration.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_mcp_transports.py | #!/usr/bin/env python3
"""
Test MCP server with different transport modes using the MCP SDK.
Tests both SSE and streaming HTTP transports.
"""
import asyncio
import json
import sys
import time
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
class MCPTransportTester:
"""Test MCP server with different transport modes."""
def __init__(self, transport: str = 'sse', host: str = 'localhost', port: int = 8000):
self.transport = transport
self.host = host
self.port = port
self.base_url = f'http://{host}:{port}'
self.test_group_id = f'test_{transport}_{int(time.time())}'
self.session = None
async def connect_sse(self) -> ClientSession:
"""Connect using SSE transport."""
print(f'🔌 Connecting to MCP server via SSE at {self.base_url}/sse')
# Use the sse_client to connect
async with sse_client(self.base_url + '/sse') as (read_stream, write_stream):
self.session = ClientSession(read_stream, write_stream)
await self.session.initialize()
return self.session
async def connect_http(self) -> ClientSession:
"""Connect using streaming HTTP transport."""
from mcp.client.http import http_client
print(f'🔌 Connecting to MCP server via HTTP at {self.base_url}')
# Use the http_client to connect
async with http_client(self.base_url) as (read_stream, write_stream):
self.session = ClientSession(read_stream, write_stream)
await self.session.initialize()
return self.session
async def test_list_tools(self) -> bool:
"""Test listing available tools."""
print('\n📋 Testing list_tools...')
try:
result = await self.session.list_tools()
tools = [tool.name for tool in result.tools]
expected_tools = [
'add_memory',
'search_memory_nodes',
'search_memory_facts',
'get_episodes',
'delete_episode',
'get_entity_edge',
'delete_entity_edge',
'clear_graph',
]
print(f' ✅ Found {len(tools)} tools')
for tool in tools[:5]: # Show first 5 tools
print(f' - {tool}')
# Check if we have most expected tools
found_tools = [t for t in expected_tools if t in tools]
success = len(found_tools) >= len(expected_tools) * 0.8
if success:
print(
f' ✅ Tool discovery successful ({len(found_tools)}/{len(expected_tools)} expected tools)'
)
else:
print(f' ❌ Missing too many tools ({len(found_tools)}/{len(expected_tools)})')
return success
except Exception as e:
print(f' ❌ Failed to list tools: {e}')
return False
async def test_add_memory(self) -> bool:
"""Test adding a memory."""
print('\n📝 Testing add_memory...')
try:
result = await self.session.call_tool(
'add_memory',
{
'name': 'Test Episode',
'episode_body': 'This is a test episode created by the MCP transport test suite.',
'group_id': self.test_group_id,
'source': 'text',
'source_description': 'Integration test',
},
)
# Check the result
if result.content:
content = result.content[0]
if hasattr(content, 'text'):
response = (
json.loads(content.text)
if content.text.startswith('{')
else {'message': content.text}
)
if 'success' in str(response).lower() or 'queued' in str(response).lower():
print(f' ✅ Memory added successfully: {response.get("message", "OK")}')
return True
else:
print(f' ❌ Unexpected response: {response}')
return False
print(' ❌ No content in response')
return False
except Exception as e:
print(f' ❌ Failed to add memory: {e}')
return False
async def test_search_nodes(self) -> bool:
"""Test searching for nodes."""
print('\n🔍 Testing search_memory_nodes...')
# Wait a bit for the memory to be processed
await asyncio.sleep(2)
try:
result = await self.session.call_tool(
'search_memory_nodes',
{'query': 'test episode', 'group_ids': [self.test_group_id], 'limit': 5},
)
if result.content:
content = result.content[0]
if hasattr(content, 'text'):
response = (
json.loads(content.text) if content.text.startswith('{') else {'nodes': []}
)
nodes = response.get('nodes', [])
print(f' ✅ Search returned {len(nodes)} nodes')
return True
print(' ⚠️ No nodes found (this may be expected if processing is async)')
return True # Don't fail on empty results
except Exception as e:
print(f' ❌ Failed to search nodes: {e}')
return False
async def test_get_episodes(self) -> bool:
"""Test getting episodes."""
print('\n📚 Testing get_episodes...')
try:
result = await self.session.call_tool(
'get_episodes', {'group_ids': [self.test_group_id], 'limit': 10}
)
if result.content:
content = result.content[0]
if hasattr(content, 'text'):
response = (
json.loads(content.text)
if content.text.startswith('{')
else {'episodes': []}
)
episodes = response.get('episodes', [])
print(f' ✅ Found {len(episodes)} episodes')
return True
print(' ⚠️ No episodes found')
return True
except Exception as e:
print(f' ❌ Failed to get episodes: {e}')
return False
async def test_clear_graph(self) -> bool:
"""Test clearing the graph."""
print('\n🧹 Testing clear_graph...')
try:
result = await self.session.call_tool('clear_graph', {'group_id': self.test_group_id})
if result.content:
content = result.content[0]
if hasattr(content, 'text'):
response = content.text
if 'success' in response.lower() or 'cleared' in response.lower():
print(' ✅ Graph cleared successfully')
return True
print(' ❌ Failed to clear graph')
return False
except Exception as e:
print(f' ❌ Failed to clear graph: {e}')
return False
async def run_tests(self) -> bool:
"""Run all tests for the configured transport."""
print(f'\n{"=" * 60}')
print(f'🚀 Testing MCP Server with {self.transport.upper()} transport')
print(f' Server: {self.base_url}')
print(f' Test Group: {self.test_group_id}')
print('=' * 60)
try:
# Connect based on transport type
if self.transport == 'sse':
await self.connect_sse()
elif self.transport == 'http':
await self.connect_http()
else:
print(f'❌ Unknown transport: {self.transport}')
return False
print(f'✅ Connected via {self.transport.upper()}')
# Run tests
results = []
results.append(await self.test_list_tools())
results.append(await self.test_add_memory())
results.append(await self.test_search_nodes())
results.append(await self.test_get_episodes())
results.append(await self.test_clear_graph())
# Summary
passed = sum(results)
total = len(results)
success = passed == total
print(f'\n{"=" * 60}')
print(f'📊 Results for {self.transport.upper()} transport:')
print(f' Passed: {passed}/{total}')
print(f' Status: {"✅ ALL TESTS PASSED" if success else "❌ SOME TESTS FAILED"}')
print('=' * 60)
return success
except Exception as e:
print(f'❌ Test suite failed: {e}')
return False
finally:
if self.session:
await self.session.close()
async def main():
"""Run tests for both transports."""
# Parse command line arguments
transport = sys.argv[1] if len(sys.argv) > 1 else 'sse'
host = sys.argv[2] if len(sys.argv) > 2 else 'localhost'
port = int(sys.argv[3]) if len(sys.argv) > 3 else 8000
# Create tester
tester = MCPTransportTester(transport, host, port)
# Run tests
success = await tester.run_tests()
# Exit with appropriate code
exit(0 if success else 1)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_mcp_transports.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_stdio_simple.py | #!/usr/bin/env python3
"""
Simple test to verify MCP server works with stdio transport.
"""
import asyncio
import os
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
async def test_stdio():
"""Test basic MCP server functionality with stdio transport."""
print('🚀 Testing MCP Server with stdio transport')
print('=' * 50)
# Configure server parameters
server_params = StdioServerParameters(
command='uv',
args=['run', '../main.py', '--transport', 'stdio'],
env={
'NEO4J_URI': os.environ.get('NEO4J_URI', 'bolt://localhost:7687'),
'NEO4J_USER': os.environ.get('NEO4J_USER', 'neo4j'),
'NEO4J_PASSWORD': os.environ.get('NEO4J_PASSWORD', 'graphiti'),
'OPENAI_API_KEY': os.environ.get('OPENAI_API_KEY', 'dummy'),
},
)
try:
async with stdio_client(server_params) as (read, write): # noqa: SIM117
async with ClientSession(read, write) as session:
print('✅ Connected to server')
# Initialize the session
await session.initialize()
print('✅ Session initialized')
# Wait for server to be fully ready
await asyncio.sleep(2)
# List tools
print('\n📋 Listing available tools...')
tools = await session.list_tools()
print(f' Found {len(tools.tools)} tools:')
for tool in tools.tools[:5]:
print(f' - {tool.name}')
# Test add_memory
print('\n📝 Testing add_memory...')
result = await session.call_tool(
'add_memory',
{
'name': 'Test Episode',
'episode_body': 'Simple test episode',
'group_id': 'test_group',
'source': 'text',
},
)
if result.content:
print(f' ✅ Memory added: {result.content[0].text[:100]}')
# Test search
print('\n🔍 Testing search_memory_nodes...')
result = await session.call_tool(
'search_memory_nodes',
{'query': 'test', 'group_ids': ['test_group'], 'limit': 5},
)
if result.content:
print(f' ✅ Search completed: {result.content[0].text[:100]}')
print('\n✅ All tests completed successfully!')
return True
except Exception as e:
print(f'\n❌ Test failed: {e}')
import traceback
traceback.print_exc()
return False
if __name__ == '__main__':
success = asyncio.run(test_stdio())
exit(0 if success else 1)
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_stdio_simple.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:mcp_server/tests/test_stress_load.py | #!/usr/bin/env python3
"""
Stress and load testing for Graphiti MCP Server.
Tests system behavior under high load, resource constraints, and edge conditions.
"""
import asyncio
import gc
import random
import time
from dataclasses import dataclass
import psutil
import pytest
from test_fixtures import TestDataGenerator, graphiti_test_client
@dataclass
class LoadTestConfig:
"""Configuration for load testing scenarios."""
num_clients: int = 10
operations_per_client: int = 100
ramp_up_time: float = 5.0 # seconds
test_duration: float = 60.0 # seconds
target_throughput: float | None = None # ops/sec
think_time: float = 0.1 # seconds between ops
@dataclass
class LoadTestResult:
"""Results from a load test run."""
total_operations: int
successful_operations: int
failed_operations: int
duration: float
throughput: float
average_latency: float
p50_latency: float
p95_latency: float
p99_latency: float
max_latency: float
errors: dict[str, int]
resource_usage: dict[str, float]
class LoadTester:
"""Orchestrate load testing scenarios."""
def __init__(self, config: LoadTestConfig):
self.config = config
self.metrics: list[tuple[float, float, bool]] = [] # (start, duration, success)
self.errors: dict[str, int] = {}
self.start_time: float | None = None
async def run_client_workload(self, client_id: int, session, group_id: str) -> dict[str, int]:
"""Run workload for a single simulated client."""
stats = {'success': 0, 'failure': 0}
data_gen = TestDataGenerator()
# Ramp-up delay
ramp_delay = (client_id / self.config.num_clients) * self.config.ramp_up_time
await asyncio.sleep(ramp_delay)
for op_num in range(self.config.operations_per_client):
operation_start = time.time()
try:
# Randomly select operation type
operation = random.choice(
[
'add_memory',
'search_memory_nodes',
'get_episodes',
]
)
if operation == 'add_memory':
args = {
'name': f'Load Test {client_id}-{op_num}',
'episode_body': data_gen.generate_technical_document(),
'source': 'text',
'source_description': 'load test',
'group_id': group_id,
}
elif operation == 'search_memory_nodes':
args = {
'query': random.choice(['performance', 'architecture', 'test', 'data']),
'group_id': group_id,
'limit': 10,
}
else: # get_episodes
args = {
'group_id': group_id,
'last_n': 10,
}
# Execute operation with timeout
await asyncio.wait_for(session.call_tool(operation, args), timeout=30.0)
duration = time.time() - operation_start
self.metrics.append((operation_start, duration, True))
stats['success'] += 1
except asyncio.TimeoutError:
duration = time.time() - operation_start
self.metrics.append((operation_start, duration, False))
self.errors['timeout'] = self.errors.get('timeout', 0) + 1
stats['failure'] += 1
except Exception as e:
duration = time.time() - operation_start
self.metrics.append((operation_start, duration, False))
error_type = type(e).__name__
self.errors[error_type] = self.errors.get(error_type, 0) + 1
stats['failure'] += 1
# Think time between operations
await asyncio.sleep(self.config.think_time)
# Stop if we've exceeded test duration
if self.start_time and (time.time() - self.start_time) > self.config.test_duration:
break
return stats
def calculate_results(self) -> LoadTestResult:
"""Calculate load test results from metrics."""
if not self.metrics:
return LoadTestResult(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {})
successful = [m for m in self.metrics if m[2]]
failed = [m for m in self.metrics if not m[2]]
latencies = sorted([m[1] for m in self.metrics])
duration = max([m[0] + m[1] for m in self.metrics]) - min([m[0] for m in self.metrics])
# Calculate percentiles
def percentile(data: list[float], p: float) -> float:
if not data:
return 0.0
idx = int(len(data) * p / 100)
return data[min(idx, len(data) - 1)]
# Get resource usage
process = psutil.Process()
resource_usage = {
'cpu_percent': process.cpu_percent(),
'memory_mb': process.memory_info().rss / 1024 / 1024,
'num_threads': process.num_threads(),
}
return LoadTestResult(
total_operations=len(self.metrics),
successful_operations=len(successful),
failed_operations=len(failed),
duration=duration,
throughput=len(self.metrics) / duration if duration > 0 else 0,
average_latency=sum(latencies) / len(latencies) if latencies else 0,
p50_latency=percentile(latencies, 50),
p95_latency=percentile(latencies, 95),
p99_latency=percentile(latencies, 99),
max_latency=max(latencies) if latencies else 0,
errors=self.errors,
resource_usage=resource_usage,
)
class TestLoadScenarios:
"""Various load testing scenarios."""
@pytest.mark.asyncio
@pytest.mark.slow
async def test_sustained_load(self):
"""Test system under sustained moderate load."""
config = LoadTestConfig(
num_clients=5,
operations_per_client=20,
ramp_up_time=2.0,
test_duration=30.0,
think_time=0.5,
)
async with graphiti_test_client() as (session, group_id):
tester = LoadTester(config)
tester.start_time = time.time()
# Run client workloads
client_tasks = []
for client_id in range(config.num_clients):
task = tester.run_client_workload(client_id, session, group_id)
client_tasks.append(task)
# Execute all clients
await asyncio.gather(*client_tasks)
# Calculate results
results = tester.calculate_results()
# Assertions
assert results.successful_operations > results.failed_operations
assert results.average_latency < 5.0, (
f'Average latency too high: {results.average_latency:.2f}s'
)
assert results.p95_latency < 10.0, f'P95 latency too high: {results.p95_latency:.2f}s'
# Report results
print('\nSustained Load Test Results:')
print(f' Total operations: {results.total_operations}')
print(
f' Success rate: {results.successful_operations / results.total_operations * 100:.1f}%'
)
print(f' Throughput: {results.throughput:.2f} ops/s')
print(f' Avg latency: {results.average_latency:.2f}s')
print(f' P95 latency: {results.p95_latency:.2f}s')
@pytest.mark.asyncio
@pytest.mark.slow
async def test_spike_load(self):
"""Test system response to sudden load spikes."""
async with graphiti_test_client() as (session, group_id):
# Normal load phase
normal_tasks = []
for i in range(3):
task = session.call_tool(
'add_memory',
{
'name': f'Normal Load {i}',
'episode_body': 'Normal operation',
'source': 'text',
'source_description': 'normal',
'group_id': group_id,
},
)
normal_tasks.append(task)
await asyncio.sleep(0.5)
await asyncio.gather(*normal_tasks)
# Spike phase - sudden burst of requests
spike_start = time.time()
spike_tasks = []
for i in range(50):
task = session.call_tool(
'add_memory',
{
'name': f'Spike Load {i}',
'episode_body': TestDataGenerator.generate_technical_document(),
'source': 'text',
'source_description': 'spike',
'group_id': group_id,
},
)
spike_tasks.append(task)
# Execute spike
spike_results = await asyncio.gather(*spike_tasks, return_exceptions=True)
spike_duration = time.time() - spike_start
# Analyze spike handling
spike_failures = sum(1 for r in spike_results if isinstance(r, Exception))
spike_success_rate = (len(spike_results) - spike_failures) / len(spike_results)
print('\nSpike Load Test Results:')
print(f' Spike size: {len(spike_tasks)} operations')
print(f' Duration: {spike_duration:.2f}s')
print(f' Success rate: {spike_success_rate * 100:.1f}%')
print(f' Throughput: {len(spike_tasks) / spike_duration:.2f} ops/s')
# System should handle at least 80% of spike
assert spike_success_rate > 0.8, f'Too many failures during spike: {spike_failures}'
@pytest.mark.asyncio
@pytest.mark.slow
async def test_memory_leak_detection(self):
"""Test for memory leaks during extended operation."""
async with graphiti_test_client() as (session, group_id):
process = psutil.Process()
gc.collect() # Force garbage collection
initial_memory = process.memory_info().rss / 1024 / 1024 # MB
# Perform many operations
for batch in range(10):
batch_tasks = []
for i in range(10):
task = session.call_tool(
'add_memory',
{
'name': f'Memory Test {batch}-{i}',
'episode_body': TestDataGenerator.generate_technical_document(),
'source': 'text',
'source_description': 'memory test',
'group_id': group_id,
},
)
batch_tasks.append(task)
await asyncio.gather(*batch_tasks)
# Force garbage collection between batches
gc.collect()
await asyncio.sleep(1)
# Check memory after operations
gc.collect()
final_memory = process.memory_info().rss / 1024 / 1024 # MB
memory_growth = final_memory - initial_memory
print('\nMemory Leak Test:')
print(f' Initial memory: {initial_memory:.1f} MB')
print(f' Final memory: {final_memory:.1f} MB')
print(f' Growth: {memory_growth:.1f} MB')
# Allow for some memory growth but flag potential leaks
# This is a soft check - actual threshold depends on system
if memory_growth > 100: # More than 100MB growth
print(f' ⚠️ Potential memory leak detected: {memory_growth:.1f} MB growth')
@pytest.mark.asyncio
@pytest.mark.slow
async def test_connection_pool_exhaustion(self):
"""Test behavior when connection pools are exhausted."""
async with graphiti_test_client() as (session, group_id):
# Create many concurrent long-running operations
long_tasks = []
for i in range(100): # Many more than typical pool size
task = session.call_tool(
'search_memory_nodes',
{
'query': f'complex query {i} '
+ ' '.join([TestDataGenerator.fake.word() for _ in range(10)]),
'group_id': group_id,
'limit': 100,
},
)
long_tasks.append(task)
# Execute with timeout
try:
results = await asyncio.wait_for(
asyncio.gather(*long_tasks, return_exceptions=True), timeout=60.0
)
# Count connection-related errors
connection_errors = sum(
1
for r in results
if isinstance(r, Exception) and 'connection' in str(r).lower()
)
print('\nConnection Pool Test:')
print(f' Total requests: {len(long_tasks)}')
print(f' Connection errors: {connection_errors}')
except asyncio.TimeoutError:
print(' Test timed out - possible deadlock or exhaustion')
@pytest.mark.asyncio
@pytest.mark.slow
async def test_gradual_degradation(self):
"""Test system degradation under increasing load."""
async with graphiti_test_client() as (session, group_id):
load_levels = [5, 10, 20, 40, 80] # Increasing concurrent operations
results_by_level = {}
for level in load_levels:
level_start = time.time()
tasks = []
for i in range(level):
task = session.call_tool(
'add_memory',
{
'name': f'Load Level {level} Op {i}',
'episode_body': f'Testing at load level {level}',
'source': 'text',
'source_description': 'degradation test',
'group_id': group_id,
},
)
tasks.append(task)
# Execute level
level_results = await asyncio.gather(*tasks, return_exceptions=True)
level_duration = time.time() - level_start
# Calculate metrics
failures = sum(1 for r in level_results if isinstance(r, Exception))
success_rate = (level - failures) / level * 100
throughput = level / level_duration
results_by_level[level] = {
'success_rate': success_rate,
'throughput': throughput,
'duration': level_duration,
}
print(f'\nLoad Level {level}:')
print(f' Success rate: {success_rate:.1f}%')
print(f' Throughput: {throughput:.2f} ops/s')
print(f' Duration: {level_duration:.2f}s')
# Brief pause between levels
await asyncio.sleep(2)
# Verify graceful degradation
# Success rate should not drop below 50% even at high load
for level, metrics in results_by_level.items():
assert metrics['success_rate'] > 50, f'Poor performance at load level {level}'
class TestResourceLimits:
"""Test behavior at resource limits."""
@pytest.mark.asyncio
async def test_large_payload_handling(self):
"""Test handling of very large payloads."""
async with graphiti_test_client() as (session, group_id):
payload_sizes = [
(1_000, '1KB'),
(10_000, '10KB'),
(100_000, '100KB'),
(1_000_000, '1MB'),
]
for size, label in payload_sizes:
content = 'x' * size
start_time = time.time()
try:
await asyncio.wait_for(
session.call_tool(
'add_memory',
{
'name': f'Large Payload {label}',
'episode_body': content,
'source': 'text',
'source_description': 'payload test',
'group_id': group_id,
},
),
timeout=30.0,
)
duration = time.time() - start_time
status = '✅ Success'
except asyncio.TimeoutError:
duration = 30.0
status = '⏱️ Timeout'
except Exception as e:
duration = time.time() - start_time
status = f'❌ Error: {type(e).__name__}'
print(f'Payload {label}: {status} ({duration:.2f}s)')
@pytest.mark.asyncio
async def test_rate_limit_handling(self):
"""Test handling of rate limits."""
async with graphiti_test_client() as (session, group_id):
# Rapid fire requests to trigger rate limits
rapid_tasks = []
for i in range(100):
task = session.call_tool(
'add_memory',
{
'name': f'Rate Limit Test {i}',
'episode_body': f'Testing rate limit {i}',
'source': 'text',
'source_description': 'rate test',
'group_id': group_id,
},
)
rapid_tasks.append(task)
# Execute without delays
results = await asyncio.gather(*rapid_tasks, return_exceptions=True)
# Count rate limit errors
rate_limit_errors = sum(
1
for r in results
if isinstance(r, Exception) and ('rate' in str(r).lower() or '429' in str(r))
)
print('\nRate Limit Test:')
print(f' Total requests: {len(rapid_tasks)}')
print(f' Rate limit errors: {rate_limit_errors}')
print(
f' Success rate: {(len(rapid_tasks) - rate_limit_errors) / len(rapid_tasks) * 100:.1f}%'
)
def generate_load_test_report(results: list[LoadTestResult]) -> str:
"""Generate comprehensive load test report."""
report = []
report.append('\n' + '=' * 60)
report.append('LOAD TEST REPORT')
report.append('=' * 60)
for i, result in enumerate(results):
report.append(f'\nTest Run {i + 1}:')
report.append(f' Total Operations: {result.total_operations}')
report.append(
f' Success Rate: {result.successful_operations / result.total_operations * 100:.1f}%'
)
report.append(f' Throughput: {result.throughput:.2f} ops/s')
report.append(
f' Latency (avg/p50/p95/p99/max): {result.average_latency:.2f}/{result.p50_latency:.2f}/{result.p95_latency:.2f}/{result.p99_latency:.2f}/{result.max_latency:.2f}s'
)
if result.errors:
report.append(' Errors:')
for error_type, count in result.errors.items():
report.append(f' {error_type}: {count}')
report.append(' Resource Usage:')
for metric, value in result.resource_usage.items():
report.append(f' {metric}: {value:.2f}')
report.append('=' * 60)
return '\n'.join(report)
if __name__ == '__main__':
pytest.main([__file__, '-v', '--asyncio-mode=auto', '-m', 'slow'])
| {
"repo_id": "getzep/graphiti",
"file_path": "mcp_server/tests/test_stress_load.py",
"license": "Apache License 2.0",
"lines": 441,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/driver/graph_operations/graph_operations.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
from pydantic import BaseModel
class GraphOperationsInterface(BaseModel):
"""
Interface for updating graph mutation behavior.
All methods use `Any` type hints to avoid circular imports. See docstrings
for expected concrete types.
Type reference:
- driver: GraphDriver
- EntityNode, EpisodicNode, CommunityNode, SagaNode from graphiti_core.nodes
- EntityEdge, EpisodicEdge, CommunityEdge from graphiti_core.edges
- EpisodeType from graphiti_core.nodes
"""
# -----------------
# Node: Save/Delete
# -----------------
async def node_save(self, node: Any, driver: Any) -> None:
"""Persist (create or update) a single node."""
raise NotImplementedError
async def node_delete(self, node: Any, driver: Any) -> None:
raise NotImplementedError
async def node_save_bulk(
self,
_cls: Any, # kept for parity; callers won't pass it
driver: Any,
transaction: Any,
nodes: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many nodes in batches."""
raise NotImplementedError
async def node_delete_by_group_id(
self,
_cls: Any,
driver: Any,
group_id: str,
batch_size: int = 100,
) -> None:
raise NotImplementedError
async def node_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
batch_size: int = 100,
) -> None:
raise NotImplementedError
# -----------------
# Node: Read
# -----------------
async def node_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single node by UUID."""
raise NotImplementedError
async def node_get_by_uuids(self, _cls: Any, driver: Any, uuids: list[str]) -> list[Any]:
"""Retrieve multiple nodes by UUIDs."""
raise NotImplementedError
async def node_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve nodes by group IDs with optional pagination."""
raise NotImplementedError
# --------------------------
# Node: Embeddings (load)
# --------------------------
async def node_load_embeddings(self, node: Any, driver: Any) -> None:
"""
Load embedding vectors for a single node into the instance (e.g., set node.embedding or similar).
"""
raise NotImplementedError
async def node_load_embeddings_bulk(
self,
driver: Any,
nodes: list[Any],
batch_size: int = 100,
) -> dict[str, list[float]]:
"""
Load embedding vectors for many nodes in batches.
"""
raise NotImplementedError
# --------------------------
# EpisodicNode: Save/Delete
# --------------------------
async def episodic_node_save(self, node: Any, driver: Any) -> None:
"""Persist (create or update) a single episodic node."""
raise NotImplementedError
async def episodic_node_delete(self, node: Any, driver: Any) -> None:
raise NotImplementedError
async def episodic_node_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
nodes: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many episodic nodes in batches."""
raise NotImplementedError
async def episodic_edge_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
episodic_edges: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many episodic edges in batches."""
raise NotImplementedError
async def episodic_node_delete_by_group_id(
self,
_cls: Any,
driver: Any,
group_id: str,
batch_size: int = 100,
) -> None:
raise NotImplementedError
async def episodic_node_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
batch_size: int = 100,
) -> None:
raise NotImplementedError
# -----------------------
# EpisodicNode: Read
# -----------------------
async def episodic_node_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single episodic node by UUID."""
raise NotImplementedError
async def episodic_node_get_by_uuids(
self, _cls: Any, driver: Any, uuids: list[str]
) -> list[Any]:
"""Retrieve multiple episodic nodes by UUIDs."""
raise NotImplementedError
async def episodic_node_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve episodic nodes by group IDs with optional pagination."""
raise NotImplementedError
async def retrieve_episodes(
self,
driver: Any,
reference_time: Any,
last_n: int = 3,
group_ids: list[str] | None = None,
source: Any | None = None,
saga: str | None = None,
) -> list[Any]:
"""
Retrieve the last n episodic nodes from the graph.
Args:
driver: GraphDriver instance
reference_time: datetime object. Only episodes with valid_at <= reference_time
are returned, allowing point-in-time queries.
last_n: Number of most recent episodes to retrieve (default: 3)
group_ids: Optional list of group IDs to filter by
source: Optional EpisodeType to filter by source type
saga: Optional saga name. If provided, only retrieves episodes
belonging to that saga.
Returns:
list[EpisodicNode]: List of EpisodicNode objects in chronological order
(oldest first)
"""
raise NotImplementedError
# -----------------------
# CommunityNode: Save/Delete
# -----------------------
async def community_node_save(self, node: Any, driver: Any) -> None:
"""Persist (create or update) a single community node."""
raise NotImplementedError
async def community_node_delete(self, node: Any, driver: Any) -> None:
raise NotImplementedError
async def community_node_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
nodes: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many community nodes in batches."""
raise NotImplementedError
async def community_node_delete_by_group_id(
self,
_cls: Any,
driver: Any,
group_id: str,
batch_size: int = 100,
) -> None:
raise NotImplementedError
async def community_node_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
batch_size: int = 100,
) -> None:
raise NotImplementedError
# -----------------------
# CommunityNode: Read
# -----------------------
async def community_node_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single community node by UUID."""
raise NotImplementedError
async def community_node_get_by_uuids(
self, _cls: Any, driver: Any, uuids: list[str]
) -> list[Any]:
"""Retrieve multiple community nodes by UUIDs."""
raise NotImplementedError
async def community_node_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve community nodes by group IDs with optional pagination."""
raise NotImplementedError
# -----------------------
# SagaNode: Save/Delete
# -----------------------
async def saga_node_save(self, node: Any, driver: Any) -> None:
"""Persist (create or update) a single saga node."""
raise NotImplementedError
async def saga_node_delete(self, node: Any, driver: Any) -> None:
raise NotImplementedError
async def saga_node_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
nodes: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many saga nodes in batches."""
raise NotImplementedError
async def saga_node_delete_by_group_id(
self,
_cls: Any,
driver: Any,
group_id: str,
batch_size: int = 100,
) -> None:
raise NotImplementedError
async def saga_node_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
batch_size: int = 100,
) -> None:
raise NotImplementedError
# -----------------------
# SagaNode: Read
# -----------------------
async def saga_node_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single saga node by UUID."""
raise NotImplementedError
async def saga_node_get_by_uuids(self, _cls: Any, driver: Any, uuids: list[str]) -> list[Any]:
"""Retrieve multiple saga nodes by UUIDs."""
raise NotImplementedError
async def saga_node_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve saga nodes by group IDs with optional pagination."""
raise NotImplementedError
# -----------------
# Edge: Save/Delete
# -----------------
async def edge_save(self, edge: Any, driver: Any) -> None:
"""Persist (create or update) a single edge."""
raise NotImplementedError
async def edge_delete(self, edge: Any, driver: Any) -> None:
raise NotImplementedError
async def edge_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
edges: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many edges in batches."""
raise NotImplementedError
async def edge_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
) -> None:
raise NotImplementedError
# -----------------
# Edge: Read
# -----------------
async def edge_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single edge by UUID."""
raise NotImplementedError
async def edge_get_by_uuids(self, _cls: Any, driver: Any, uuids: list[str]) -> list[Any]:
"""Retrieve multiple edges by UUIDs."""
raise NotImplementedError
async def edge_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve edges by group IDs with optional pagination."""
raise NotImplementedError
# -----------------
# Edge: Embeddings (load)
# -----------------
async def edge_load_embeddings(self, edge: Any, driver: Any) -> None:
"""
Load embedding vectors for a single edge into the instance (e.g., set edge.embedding or similar).
"""
raise NotImplementedError
async def edge_load_embeddings_bulk(
self,
driver: Any,
edges: list[Any],
batch_size: int = 100,
) -> dict[str, list[float]]:
"""
Load embedding vectors for many edges in batches
"""
raise NotImplementedError
# ---------------------------
# EpisodicEdge: Save/Delete
# ---------------------------
async def episodic_edge_save(self, edge: Any, driver: Any) -> None:
"""Persist (create or update) a single episodic edge (MENTIONS)."""
raise NotImplementedError
async def episodic_edge_delete(self, edge: Any, driver: Any) -> None:
raise NotImplementedError
async def episodic_edge_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
) -> None:
raise NotImplementedError
# ---------------------------
# EpisodicEdge: Read
# ---------------------------
async def episodic_edge_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single episodic edge by UUID."""
raise NotImplementedError
async def episodic_edge_get_by_uuids(
self, _cls: Any, driver: Any, uuids: list[str]
) -> list[Any]:
"""Retrieve multiple episodic edges by UUIDs."""
raise NotImplementedError
async def episodic_edge_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve episodic edges by group IDs with optional pagination."""
raise NotImplementedError
# ---------------------------
# CommunityEdge: Save/Delete
# ---------------------------
async def community_edge_save(self, edge: Any, driver: Any) -> None:
"""Persist (create or update) a single community edge (HAS_MEMBER)."""
raise NotImplementedError
async def community_edge_delete(self, edge: Any, driver: Any) -> None:
raise NotImplementedError
async def community_edge_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
) -> None:
raise NotImplementedError
# ---------------------------
# CommunityEdge: Read
# ---------------------------
async def community_edge_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single community edge by UUID."""
raise NotImplementedError
async def community_edge_get_by_uuids(
self, _cls: Any, driver: Any, uuids: list[str]
) -> list[Any]:
"""Retrieve multiple community edges by UUIDs."""
raise NotImplementedError
async def community_edge_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve community edges by group IDs with optional pagination."""
raise NotImplementedError
# ---------------------------
# HasEpisodeEdge: Save/Delete
# ---------------------------
async def has_episode_edge_save(self, edge: Any, driver: Any) -> None:
"""Persist (create or update) a single has_episode edge."""
raise NotImplementedError
async def has_episode_edge_delete(self, edge: Any, driver: Any) -> None:
raise NotImplementedError
async def has_episode_edge_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
edges: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many has_episode edges in batches."""
raise NotImplementedError
async def has_episode_edge_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
) -> None:
raise NotImplementedError
# ---------------------------
# HasEpisodeEdge: Read
# ---------------------------
async def has_episode_edge_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single has_episode edge by UUID."""
raise NotImplementedError
async def has_episode_edge_get_by_uuids(
self, _cls: Any, driver: Any, uuids: list[str]
) -> list[Any]:
"""Retrieve multiple has_episode edges by UUIDs."""
raise NotImplementedError
async def has_episode_edge_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve has_episode edges by group IDs with optional pagination."""
raise NotImplementedError
# ----------------------------
# NextEpisodeEdge: Save/Delete
# ----------------------------
async def next_episode_edge_save(self, edge: Any, driver: Any) -> None:
"""Persist (create or update) a single next_episode edge."""
raise NotImplementedError
async def next_episode_edge_delete(self, edge: Any, driver: Any) -> None:
raise NotImplementedError
async def next_episode_edge_save_bulk(
self,
_cls: Any,
driver: Any,
transaction: Any,
edges: list[Any],
batch_size: int = 100,
) -> None:
"""Persist (create or update) many next_episode edges in batches."""
raise NotImplementedError
async def next_episode_edge_delete_by_uuids(
self,
_cls: Any,
driver: Any,
uuids: list[str],
group_id: str | None = None,
) -> None:
raise NotImplementedError
# ----------------------------
# NextEpisodeEdge: Read
# ----------------------------
async def next_episode_edge_get_by_uuid(self, _cls: Any, driver: Any, uuid: str) -> Any:
"""Retrieve a single next_episode edge by UUID."""
raise NotImplementedError
async def next_episode_edge_get_by_uuids(
self, _cls: Any, driver: Any, uuids: list[str]
) -> list[Any]:
"""Retrieve multiple next_episode edges by UUIDs."""
raise NotImplementedError
async def next_episode_edge_get_by_group_ids(
self,
_cls: Any,
driver: Any,
group_ids: list[str],
limit: int | None = None,
uuid_cursor: str | None = None,
) -> list[Any]:
"""Retrieve next_episode edges by group IDs with optional pagination."""
raise NotImplementedError
# -----------------
# Search
# -----------------
async def get_mentioned_nodes(
self,
driver: Any,
episodes: list[Any],
) -> list[Any]:
"""
Retrieve entity nodes mentioned by the given episodic nodes.
Args:
driver: GraphDriver instance
episodes: List of EpisodicNode objects
Returns:
list[EntityNode]: List of EntityNode objects that are mentioned
by the given episodes via MENTIONS relationships
"""
raise NotImplementedError
async def get_communities_by_nodes(
self,
driver: Any,
nodes: list[Any],
) -> list[Any]:
"""
Retrieve community nodes that contain the given entity nodes as members.
Args:
driver: GraphDriver instance
nodes: List of EntityNode objects
Returns:
list[CommunityNode]: List of CommunityNode objects that have
HAS_MEMBER relationships to the given entity nodes
"""
raise NotImplementedError
# -----------------
# Maintenance
# -----------------
async def clear_data(
self,
driver: Any,
group_ids: list[str] | None = None,
) -> None:
"""
Clear all data or group-specific data from the graph.
Args:
driver: GraphDriver instance
group_ids: If provided, only delete data in these groups.
If None, deletes ALL data in the graph.
"""
raise NotImplementedError
async def get_community_clusters(
self,
driver: Any,
group_ids: list[str] | None,
) -> list[list[Any]]:
"""
Retrieve all entity node clusters for community detection.
Uses label propagation algorithm internally to identify clusters
of related entities based on their edge connections.
Args:
driver: GraphDriver instance
group_ids: List of group IDs to process. If None, processes
all groups found in the graph.
Returns:
list[list[EntityNode]]: List of clusters, where each cluster
is a list of EntityNode objects that belong together
"""
raise NotImplementedError
async def remove_communities(
self,
driver: Any,
) -> None:
"""
Delete all community nodes from the graph.
This removes all Community-labeled nodes and their relationships.
Args:
driver: GraphDriver instance
"""
raise NotImplementedError
async def determine_entity_community(
self,
driver: Any,
entity: Any,
) -> tuple[Any | None, bool]:
"""
Determine which community an entity belongs to.
First checks if the entity is already a member of a community.
If not, finds the most common community among neighboring entities.
Args:
driver: GraphDriver instance
entity: EntityNode object to find community for
Returns:
tuple[CommunityNode | None, bool]: Tuple of (community, is_new) where:
- community: The CommunityNode the entity belongs to, or None
- is_new: True if this is a new membership (entity wasn't already
in this community), False if entity was already a member
"""
raise NotImplementedError
# -----------------
# Additional Node Operations
# -----------------
async def episodic_node_get_by_entity_node_uuid(
self,
_cls: Any,
driver: Any,
entity_node_uuid: str,
) -> list[Any]:
"""
Retrieve all episodes mentioning a specific entity.
Args:
_cls: The EpisodicNode class (for interface consistency)
driver: GraphDriver instance
entity_node_uuid: UUID of the EntityNode to find episodes for
Returns:
list[EpisodicNode]: List of EpisodicNode objects that have
MENTIONS relationships to the specified entity
"""
raise NotImplementedError
async def community_node_load_name_embedding(
self,
node: Any,
driver: Any,
) -> None:
"""
Load the name embedding for a community node.
Populates the node.name_embedding field in-place.
Args:
node: CommunityNode object to load embedding for
driver: GraphDriver instance
"""
raise NotImplementedError
# -----------------
# Additional Edge Operations
# -----------------
async def edge_get_between_nodes(
self,
_cls: Any,
driver: Any,
source_node_uuid: str,
target_node_uuid: str,
) -> list[Any]:
"""
Get edges connecting two specific entity nodes.
Args:
_cls: The EntityEdge class (for interface consistency)
driver: GraphDriver instance
source_node_uuid: UUID of the source EntityNode
target_node_uuid: UUID of the target EntityNode
Returns:
list[EntityEdge]: List of EntityEdge objects connecting the two nodes.
Note: Only returns edges in the source->target direction.
"""
raise NotImplementedError
async def edge_get_by_node_uuid(
self,
_cls: Any,
driver: Any,
node_uuid: str,
) -> list[Any]:
"""
Get all edges connected to a specific node.
Args:
_cls: The EntityEdge class (for interface consistency)
driver: GraphDriver instance
node_uuid: UUID of the EntityNode to find edges for
Returns:
list[EntityEdge]: List of EntityEdge objects where the node
is either the source or target
"""
raise NotImplementedError
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/graph_operations/graph_operations.py",
"license": "Apache License 2.0",
"lines": 698,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/search_interface/search_interface.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
from pydantic import BaseModel
class SearchInterface(BaseModel):
"""
Interface for implementing custom search logic.
All methods use `Any` type hints to avoid circular imports. See docstrings
for expected concrete types.
Type reference:
- driver: GraphDriver
- search_filter: SearchFilters
- EntityNode, EpisodicNode, CommunityNode from graphiti_core.nodes
- EntityEdge from graphiti_core.edges
"""
async def edge_fulltext_search(
self,
driver: Any,
query: str,
search_filter: Any,
group_ids: list[str] | None = None,
limit: int = 100,
) -> list[Any]:
"""
Perform fulltext search over edge facts and names.
Args:
driver: GraphDriver instance
query: Search query string
search_filter: SearchFilters instance for filtering results
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
Returns:
list[EntityEdge]: List of matching EntityEdge objects
"""
raise NotImplementedError
async def edge_similarity_search(
self,
driver: Any,
search_vector: list[float],
source_node_uuid: str | None,
target_node_uuid: str | None,
search_filter: Any,
group_ids: list[str] | None = None,
limit: int = 100,
min_score: float = 0.7,
) -> list[Any]:
"""
Perform vector similarity search over edge fact embeddings.
Args:
driver: GraphDriver instance
search_vector: Query embedding vector
source_node_uuid: Optional source node UUID to filter by
target_node_uuid: Optional target node UUID to filter by
search_filter: SearchFilters instance for filtering results
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
min_score: Minimum similarity score threshold (0.0 to 1.0)
Returns:
list[EntityEdge]: List of matching EntityEdge objects
"""
raise NotImplementedError
async def node_fulltext_search(
self,
driver: Any,
query: str,
search_filter: Any,
group_ids: list[str] | None = None,
limit: int = 100,
) -> list[Any]:
"""
Perform fulltext search over node names and summaries.
Args:
driver: GraphDriver instance
query: Search query string
search_filter: SearchFilters instance for filtering results
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
Returns:
list[EntityNode]: List of matching EntityNode objects
"""
raise NotImplementedError
async def node_similarity_search(
self,
driver: Any,
search_vector: list[float],
search_filter: Any,
group_ids: list[str] | None = None,
limit: int = 100,
min_score: float = 0.7,
) -> list[Any]:
"""
Perform vector similarity search over node name embeddings.
Args:
driver: GraphDriver instance
search_vector: Query embedding vector
search_filter: SearchFilters instance for filtering results
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
min_score: Minimum similarity score threshold (0.0 to 1.0)
Returns:
list[EntityNode]: List of matching EntityNode objects
"""
raise NotImplementedError
async def episode_fulltext_search(
self,
driver: Any,
query: str,
search_filter: Any,
group_ids: list[str] | None = None,
limit: int = 100,
) -> list[Any]:
"""
Perform fulltext search over episode content.
Args:
driver: GraphDriver instance
query: Search query string
search_filter: SearchFilters instance (kept for interface parity)
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
Returns:
list[EpisodicNode]: List of matching EpisodicNode objects
"""
raise NotImplementedError
async def edge_bfs_search(
self,
driver: Any,
bfs_origin_node_uuids: list[str] | None,
bfs_max_depth: int,
search_filter: Any,
group_ids: list[str] | None = None,
limit: int = 100,
) -> list[Any]:
"""
Perform breadth-first search for edges starting from origin nodes.
Args:
driver: GraphDriver instance
bfs_origin_node_uuids: List of starting node UUIDs (Entity or Episodic).
Returns empty list if None or empty.
bfs_max_depth: Maximum traversal depth (must be >= 1)
search_filter: SearchFilters instance for filtering results
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
Returns:
list[EntityEdge]: List of EntityEdge objects found within the search depth
"""
raise NotImplementedError
async def node_bfs_search(
self,
driver: Any,
bfs_origin_node_uuids: list[str] | None,
search_filter: Any,
bfs_max_depth: int,
group_ids: list[str] | None = None,
limit: int = 100,
) -> list[Any]:
"""
Perform breadth-first search for nodes starting from origin nodes.
Args:
driver: GraphDriver instance
bfs_origin_node_uuids: List of starting node UUIDs (Entity or Episodic).
Returns empty list if None or empty.
search_filter: SearchFilters instance for filtering results
bfs_max_depth: Maximum traversal depth (must be >= 1, returns empty if < 1)
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
Returns:
list[EntityNode]: List of EntityNode objects found within the search depth
"""
raise NotImplementedError
async def community_fulltext_search(
self,
driver: Any,
query: str,
group_ids: list[str] | None = None,
limit: int = 100,
) -> list[Any]:
"""
Perform fulltext search over community names.
Args:
driver: GraphDriver instance
query: Search query string
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
Returns:
list[CommunityNode]: List of matching CommunityNode objects
"""
raise NotImplementedError
async def community_similarity_search(
self,
driver: Any,
search_vector: list[float],
group_ids: list[str] | None = None,
limit: int = 100,
min_score: float = 0.6,
) -> list[Any]:
"""
Perform vector similarity search over community name embeddings.
Args:
driver: GraphDriver instance
search_vector: Query embedding vector
group_ids: Optional list of group IDs to filter by
limit: Maximum number of results to return
min_score: Minimum similarity score threshold (0.0 to 1.0)
Returns:
list[CommunityNode]: List of matching CommunityNode objects
"""
raise NotImplementedError
async def get_embeddings_for_communities(
self,
driver: Any,
communities: list[Any],
) -> dict[str, list[float]]:
"""
Load name embeddings for a list of community nodes.
Args:
driver: GraphDriver instance
communities: List of CommunityNode objects to load embeddings for
Returns:
dict[str, list[float]]: Mapping of community UUID to name embedding vector
"""
raise NotImplementedError
async def node_distance_reranker(
self,
driver: Any,
node_uuids: list[str],
center_node_uuid: str,
min_score: float = 0,
) -> tuple[list[str], list[float]]:
"""
Rerank nodes by their graph distance to a center node.
Nodes directly connected to the center node get score 1.0, the center node
itself gets score 0.1 (if in the input list), and unconnected nodes get
score approaching 0 (1/infinity).
Args:
driver: GraphDriver instance
node_uuids: List of node UUIDs to rerank. The center_node_uuid will be
filtered out during processing but included in results if present.
center_node_uuid: UUID of the center node to measure distances from
min_score: Minimum score threshold. Nodes with 1/distance < min_score
are excluded from results.
Returns:
tuple[list[str], list[float]]: Tuple of (sorted_uuids, scores) where
scores are 1/distance values, sorted by distance ascending
"""
raise NotImplementedError
async def episode_mentions_reranker(
self,
driver: Any,
node_uuids: list[list[str]],
min_score: float = 0,
) -> tuple[list[str], list[float]]:
"""
Rerank nodes by their episode mention count.
Uses RRF (Reciprocal Rank Fusion) as a preliminary ranker, then reranks
by the number of episodes that mention each node.
Args:
driver: GraphDriver instance
node_uuids: List of ranked UUID lists (e.g., from multiple search results)
to be merged and reranked
min_score: Minimum mention count threshold. Nodes with fewer mentions
are excluded from results.
Returns:
tuple[list[str], list[float]]: Tuple of (sorted_uuids, mention_counts)
sorted by mention count descending
"""
raise NotImplementedError
# ---------- SEARCH FILTERS (sync) ----------
def build_node_search_filters(self, search_filters: Any) -> Any:
"""
Build provider-specific node search filters.
Args:
search_filters: SearchFilters instance
Returns:
Provider-specific filter representation
"""
raise NotImplementedError
def build_edge_search_filters(self, search_filters: Any) -> Any:
"""
Build provider-specific edge search filters.
Args:
search_filters: SearchFilters instance
Returns:
Provider-specific filter representation
"""
raise NotImplementedError
class Config:
arbitrary_types_allowed = True
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/search_interface/search_interface.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:examples/opentelemetry/otel_stdout_example.py | """
Copyright 2025, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import json
import logging
from datetime import datetime, timezone
from logging import INFO
from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
from graphiti_core import Graphiti
from graphiti_core.driver.kuzu_driver import KuzuDriver
from graphiti_core.nodes import EpisodeType
logging.basicConfig(
level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
def setup_otel_stdout_tracing():
"""Configure OpenTelemetry to export traces to stdout."""
resource = Resource(attributes={'service.name': 'graphiti-example'})
provider = TracerProvider(resource=resource)
provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
trace.set_tracer_provider(provider)
return trace.get_tracer(__name__)
async def main():
otel_tracer = setup_otel_stdout_tracing()
print('OpenTelemetry stdout tracing enabled\n')
kuzu_driver = KuzuDriver()
graphiti = Graphiti(
graph_driver=kuzu_driver, tracer=otel_tracer, trace_span_prefix='graphiti.example'
)
try:
await graphiti.build_indices_and_constraints()
print('Graph indices and constraints built\n')
episodes = [
{
'content': 'Kamala Harris is the Attorney General of California. She was previously '
'the district attorney for San Francisco.',
'type': EpisodeType.text,
'description': 'biographical information',
},
{
'content': 'As AG, Harris was in office from January 3, 2011 – January 3, 2017',
'type': EpisodeType.text,
'description': 'term dates',
},
{
'content': {
'name': 'Gavin Newsom',
'position': 'Governor',
'state': 'California',
'previous_role': 'Lieutenant Governor',
},
'type': EpisodeType.json,
'description': 'structured data',
},
]
print('Adding episodes...\n')
for i, episode in enumerate(episodes):
await graphiti.add_episode(
name=f'Episode {i}',
episode_body=episode['content']
if isinstance(episode['content'], str)
else json.dumps(episode['content']),
source=episode['type'],
source_description=episode['description'],
reference_time=datetime.now(timezone.utc),
)
print(f'Added episode: Episode {i} ({episode["type"].value})')
print("\nSearching for: 'Who was the California Attorney General?'\n")
results = await graphiti.search('Who was the California Attorney General?')
print('Search Results:')
for idx, result in enumerate(results[:3]):
print(f'\nResult {idx + 1}:')
print(f' Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f' Valid from: {result.valid_at}')
print("\nSearching for: 'What positions has Gavin Newsom held?'\n")
results = await graphiti.search('What positions has Gavin Newsom held?')
print('Search Results:')
for idx, result in enumerate(results[:3]):
print(f'\nResult {idx + 1}:')
print(f' Fact: {result.fact}')
print('\nExample complete')
finally:
await graphiti.close()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "examples/opentelemetry/otel_stdout_example.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/tracer.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from collections.abc import Generator
from contextlib import AbstractContextManager, contextmanager, suppress
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from opentelemetry.trace import Span, StatusCode
try:
from opentelemetry.trace import Span, StatusCode
OTEL_AVAILABLE = True
except ImportError:
OTEL_AVAILABLE = False
class TracerSpan(ABC):
"""Abstract base class for tracer spans."""
@abstractmethod
def add_attributes(self, attributes: dict[str, Any]) -> None:
"""Add attributes to the span."""
pass
@abstractmethod
def set_status(self, status: str, description: str | None = None) -> None:
"""Set the status of the span."""
pass
@abstractmethod
def record_exception(self, exception: Exception) -> None:
"""Record an exception in the span."""
pass
class Tracer(ABC):
"""Abstract base class for tracers."""
@abstractmethod
def start_span(self, name: str) -> AbstractContextManager[TracerSpan]:
"""Start a new span with the given name."""
pass
class NoOpSpan(TracerSpan):
"""No-op span implementation that does nothing."""
def add_attributes(self, attributes: dict[str, Any]) -> None:
pass
def set_status(self, status: str, description: str | None = None) -> None:
pass
def record_exception(self, exception: Exception) -> None:
pass
class NoOpTracer(Tracer):
"""No-op tracer implementation that does nothing."""
@contextmanager
def start_span(self, name: str) -> Generator[NoOpSpan, None, None]:
"""Return a no-op span."""
yield NoOpSpan()
class OpenTelemetrySpan(TracerSpan):
"""Wrapper for OpenTelemetry span."""
def __init__(self, span: 'Span'):
self._span = span
def add_attributes(self, attributes: dict[str, Any]) -> None:
"""Add attributes to the OpenTelemetry span."""
try:
# Filter out None values and convert all values to appropriate types
filtered_attrs = {}
for key, value in attributes.items():
if value is not None:
# Convert to string if not a primitive type
if isinstance(value, str | int | float | bool):
filtered_attrs[key] = value
else:
filtered_attrs[key] = str(value)
if filtered_attrs:
self._span.set_attributes(filtered_attrs)
except Exception:
# Silently ignore tracing errors
pass
def set_status(self, status: str, description: str | None = None) -> None:
"""Set the status of the OpenTelemetry span."""
try:
if OTEL_AVAILABLE:
if status == 'error':
self._span.set_status(StatusCode.ERROR, description)
elif status == 'ok':
self._span.set_status(StatusCode.OK, description)
except Exception:
# Silently ignore tracing errors
pass
def record_exception(self, exception: Exception) -> None:
"""Record an exception in the OpenTelemetry span."""
with suppress(Exception):
self._span.record_exception(exception)
class OpenTelemetryTracer(Tracer):
"""Wrapper for OpenTelemetry tracer with configurable span name prefix."""
def __init__(self, tracer: Any, span_prefix: str = 'graphiti'):
"""
Initialize the OpenTelemetry tracer wrapper.
Parameters
----------
tracer : opentelemetry.trace.Tracer
The OpenTelemetry tracer instance.
span_prefix : str, optional
Prefix to prepend to all span names. Defaults to 'graphiti'.
"""
if not OTEL_AVAILABLE:
raise ImportError(
'OpenTelemetry is not installed. Install it with: pip install opentelemetry-api'
)
self._tracer = tracer
self._span_prefix = span_prefix.rstrip('.')
@contextmanager
def start_span(self, name: str) -> Generator[OpenTelemetrySpan | NoOpSpan, None, None]:
"""Start a new OpenTelemetry span with the configured prefix."""
try:
full_name = f'{self._span_prefix}.{name}'
with self._tracer.start_as_current_span(full_name) as span:
yield OpenTelemetrySpan(span)
except Exception:
# If tracing fails, yield a no-op span to prevent breaking the operation
yield NoOpSpan()
def create_tracer(otel_tracer: Any | None = None, span_prefix: str = 'graphiti') -> Tracer:
"""
Create a tracer instance.
Parameters
----------
otel_tracer : opentelemetry.trace.Tracer | None, optional
An OpenTelemetry tracer instance. If None, a no-op tracer is returned.
span_prefix : str, optional
Prefix to prepend to all span names. Defaults to 'graphiti'.
Returns
-------
Tracer
A tracer instance (either OpenTelemetryTracer or NoOpTracer).
Examples
--------
Using with OpenTelemetry:
>>> from opentelemetry import trace
>>> otel_tracer = trace.get_tracer(__name__)
>>> tracer = create_tracer(otel_tracer, span_prefix='myapp.graphiti')
Using no-op tracer:
>>> tracer = create_tracer() # Returns NoOpTracer
"""
if otel_tracer is None:
return NoOpTracer()
if not OTEL_AVAILABLE:
return NoOpTracer()
return OpenTelemetryTracer(otel_tracer, span_prefix)
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/tracer.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/utils/text_utils.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
# Maximum length for entity/node summaries
MAX_SUMMARY_CHARS = 500
def truncate_at_sentence(text: str, max_chars: int) -> str:
"""
Truncate text at or about max_chars while respecting sentence boundaries.
Attempts to truncate at the last complete sentence before max_chars.
If no sentence boundary is found before max_chars, truncates at max_chars.
Args:
text: The text to truncate
max_chars: Maximum number of characters
Returns:
Truncated text
"""
if not text or len(text) <= max_chars:
return text
# Find all sentence boundaries (., !, ?) up to max_chars
truncated = text[:max_chars]
# Look for sentence boundaries: period, exclamation, or question mark followed by space or end
sentence_pattern = r'[.!?](?:\s|$)'
matches = list(re.finditer(sentence_pattern, truncated))
if matches:
# Truncate at the last sentence boundary found
last_match = matches[-1]
return text[: last_match.end()].rstrip()
# No sentence boundary found, truncate at max_chars
return truncated.rstrip()
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/utils/text_utils.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:tests/test_text_utils.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from graphiti_core.utils.text_utils import MAX_SUMMARY_CHARS, truncate_at_sentence
def test_truncate_at_sentence_short_text():
"""Test that short text is returned unchanged."""
text = 'This is a short sentence.'
result = truncate_at_sentence(text, 100)
assert result == text
def test_truncate_at_sentence_empty():
"""Test that empty text is handled correctly."""
assert truncate_at_sentence('', 100) == ''
assert truncate_at_sentence(None, 100) is None
def test_truncate_at_sentence_exact_length():
"""Test text at exactly max_chars."""
text = 'A' * 100
result = truncate_at_sentence(text, 100)
assert result == text
def test_truncate_at_sentence_with_period():
"""Test truncation at sentence boundary with period."""
text = 'First sentence. Second sentence. Third sentence. Fourth sentence.'
result = truncate_at_sentence(text, 40)
assert result == 'First sentence. Second sentence.'
assert len(result) <= 40
def test_truncate_at_sentence_with_question():
"""Test truncation at sentence boundary with question mark."""
text = 'What is this? This is a test. More text here.'
result = truncate_at_sentence(text, 30)
assert result == 'What is this? This is a test.'
assert len(result) <= 32
def test_truncate_at_sentence_with_exclamation():
"""Test truncation at sentence boundary with exclamation mark."""
text = 'Hello world! This is exciting. And more text.'
result = truncate_at_sentence(text, 30)
assert result == 'Hello world! This is exciting.'
assert len(result) <= 32
def test_truncate_at_sentence_no_boundary():
"""Test truncation when no sentence boundary exists before max_chars."""
text = 'This is a very long sentence without any punctuation marks near the beginning'
result = truncate_at_sentence(text, 30)
assert len(result) <= 30
assert result.startswith('This is a very long sentence')
def test_truncate_at_sentence_multiple_periods():
"""Test with multiple sentence endings."""
text = 'A. B. C. D. E. F. G. H.'
result = truncate_at_sentence(text, 10)
assert result == 'A. B. C.'
assert len(result) <= 10
def test_truncate_at_sentence_strips_trailing_whitespace():
"""Test that trailing whitespace is stripped."""
text = 'First sentence. Second sentence.'
result = truncate_at_sentence(text, 20)
assert result == 'First sentence.'
assert not result.endswith(' ')
def test_max_summary_chars_constant():
"""Test that MAX_SUMMARY_CHARS is set to expected value."""
assert MAX_SUMMARY_CHARS == 500
def test_truncate_at_sentence_realistic_summary():
"""Test with a realistic entity summary."""
text = (
'John is a software engineer who works at a tech company in San Francisco. '
'He has been programming for over 10 years and specializes in Python and distributed systems. '
'John enjoys hiking on weekends and is learning to play guitar. '
'He graduated from MIT with a degree in computer science.'
)
result = truncate_at_sentence(text, MAX_SUMMARY_CHARS)
assert len(result) <= MAX_SUMMARY_CHARS
# Should keep complete sentences
assert result.endswith('.')
# Should include at least the first sentence
assert 'John is a software engineer' in result
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/test_text_utils.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/prompts/snippets.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
summary_instructions = """Guidelines:
1. Output only factual content. Never explain what you're doing, why, or mention limitations/constraints.
2. Only use the provided messages, entity, and entity context to set attribute values.
3. Keep the summary concise and to the point. STATE FACTS DIRECTLY IN UNDER 250 CHARACTERS.
Example summaries:
BAD: "This is the only activity in the context. The user listened to this song. No other details were provided to include in this summary."
GOOD: "User played 'Blue Monday' by New Order (electronic genre) on 2024-12-03 at 14:22 UTC."
BAD: "Based on the messages provided, the user attended a meeting. This summary focuses on that event as it was the main topic discussed."
GOOD: "User attended Q3 planning meeting with sales team on March 15."
BAD: "The context shows John ordered pizza. Due to length constraints, other details are omitted from this summary."
GOOD: "John ordered pepperoni pizza from Mario's at 7:30 PM, delivered to office."
"""
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/prompts/snippets.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
getzep/graphiti:tests/utils/maintenance/test_bulk_utils.py | from collections import deque
from unittest.mock import AsyncMock, MagicMock
import pytest
from graphiti_core.edges import EntityEdge
from graphiti_core.graphiti_types import GraphitiClients
from graphiti_core.nodes import EntityNode, EpisodeType, EpisodicNode
from graphiti_core.utils import bulk_utils
from graphiti_core.utils.bulk_utils import extract_nodes_and_edges_bulk
from graphiti_core.utils.datetime_utils import utc_now
def _make_episode(uuid_suffix: str, group_id: str = 'group') -> EpisodicNode:
return EpisodicNode(
name=f'episode-{uuid_suffix}',
group_id=group_id,
labels=[],
source=EpisodeType.message,
content='content',
source_description='test',
created_at=utc_now(),
valid_at=utc_now(),
)
def _make_clients() -> GraphitiClients:
driver = MagicMock()
embedder = MagicMock()
cross_encoder = MagicMock()
llm_client = MagicMock()
return GraphitiClients.model_construct( # bypass validation to allow test doubles
driver=driver,
embedder=embedder,
cross_encoder=cross_encoder,
llm_client=llm_client,
)
@pytest.mark.asyncio
async def test_dedupe_nodes_bulk_reuses_canonical_nodes(monkeypatch):
clients = _make_clients()
episode_one = _make_episode('1')
episode_two = _make_episode('2')
extracted_one = EntityNode(name='Alice Smith', group_id='group', labels=['Entity'])
extracted_two = EntityNode(name='Alice Smith', group_id='group', labels=['Entity'])
canonical = extracted_one
call_queue = deque()
async def fake_resolve(
clients_arg,
nodes_arg,
episode_arg,
previous_episodes_arg,
entity_types_arg,
existing_nodes_override=None,
):
call_queue.append(existing_nodes_override)
if nodes_arg == [extracted_one]:
return [canonical], {canonical.uuid: canonical.uuid}, []
assert nodes_arg == [extracted_two]
assert existing_nodes_override is None
return [canonical], {extracted_two.uuid: canonical.uuid}, [(extracted_two, canonical)]
monkeypatch.setattr(bulk_utils, 'resolve_extracted_nodes', fake_resolve)
nodes_by_episode, compressed_map = await bulk_utils.dedupe_nodes_bulk(
clients,
[[extracted_one], [extracted_two]],
[(episode_one, []), (episode_two, [])],
)
assert len(call_queue) == 2
assert call_queue[0] is None
assert call_queue[1] is None
assert nodes_by_episode[episode_one.uuid] == [canonical]
assert nodes_by_episode[episode_two.uuid] == [canonical]
assert compressed_map.get(extracted_two.uuid) == canonical.uuid
@pytest.mark.asyncio
async def test_dedupe_nodes_bulk_handles_empty_batch(monkeypatch):
clients = _make_clients()
resolve_mock = AsyncMock()
monkeypatch.setattr(bulk_utils, 'resolve_extracted_nodes', resolve_mock)
nodes_by_episode, compressed_map = await bulk_utils.dedupe_nodes_bulk(
clients,
[],
[],
)
assert nodes_by_episode == {}
assert compressed_map == {}
resolve_mock.assert_not_awaited()
@pytest.mark.asyncio
async def test_dedupe_nodes_bulk_single_episode(monkeypatch):
clients = _make_clients()
episode = _make_episode('solo')
extracted = EntityNode(name='Solo', group_id='group', labels=['Entity'])
resolve_mock = AsyncMock(return_value=([extracted], {extracted.uuid: extracted.uuid}, []))
monkeypatch.setattr(bulk_utils, 'resolve_extracted_nodes', resolve_mock)
nodes_by_episode, compressed_map = await bulk_utils.dedupe_nodes_bulk(
clients,
[[extracted]],
[(episode, [])],
)
assert nodes_by_episode == {episode.uuid: [extracted]}
assert compressed_map == {extracted.uuid: extracted.uuid}
resolve_mock.assert_awaited_once()
@pytest.mark.asyncio
async def test_dedupe_nodes_bulk_uuid_map_respects_direction(monkeypatch):
clients = _make_clients()
episode_one = _make_episode('one')
episode_two = _make_episode('two')
extracted_one = EntityNode(uuid='b-uuid', name='Edge Case', group_id='group', labels=['Entity'])
extracted_two = EntityNode(uuid='a-uuid', name='Edge Case', group_id='group', labels=['Entity'])
canonical = extracted_one
alias = extracted_two
async def fake_resolve(
clients_arg,
nodes_arg,
episode_arg,
previous_episodes_arg,
entity_types_arg,
existing_nodes_override=None,
):
if nodes_arg == [extracted_one]:
return [canonical], {canonical.uuid: canonical.uuid}, []
assert nodes_arg == [extracted_two]
return [canonical], {alias.uuid: canonical.uuid}, [(alias, canonical)]
monkeypatch.setattr(bulk_utils, 'resolve_extracted_nodes', fake_resolve)
nodes_by_episode, compressed_map = await bulk_utils.dedupe_nodes_bulk(
clients,
[[extracted_one], [extracted_two]],
[(episode_one, []), (episode_two, [])],
)
assert nodes_by_episode[episode_one.uuid] == [canonical]
assert nodes_by_episode[episode_two.uuid] == [canonical]
assert compressed_map.get(alias.uuid) == canonical.uuid
@pytest.mark.asyncio
async def test_dedupe_nodes_bulk_missing_canonical_falls_back(monkeypatch, caplog):
clients = _make_clients()
episode = _make_episode('missing')
extracted = EntityNode(name='Fallback', group_id='group', labels=['Entity'])
resolve_mock = AsyncMock(return_value=([extracted], {extracted.uuid: 'missing-canonical'}, []))
monkeypatch.setattr(bulk_utils, 'resolve_extracted_nodes', resolve_mock)
with caplog.at_level('WARNING'):
nodes_by_episode, compressed_map = await bulk_utils.dedupe_nodes_bulk(
clients,
[[extracted]],
[(episode, [])],
)
assert nodes_by_episode[episode.uuid] == [extracted]
assert compressed_map.get(extracted.uuid) == 'missing-canonical'
assert any('Canonical node missing' in rec.message for rec in caplog.records)
def test_build_directed_uuid_map_empty():
assert bulk_utils._build_directed_uuid_map([]) == {}
def test_build_directed_uuid_map_chain():
mapping = bulk_utils._build_directed_uuid_map(
[
('a', 'b'),
('b', 'c'),
]
)
assert mapping['a'] == 'c'
assert mapping['b'] == 'c'
assert mapping['c'] == 'c'
def test_build_directed_uuid_map_preserves_direction():
mapping = bulk_utils._build_directed_uuid_map(
[
('alias', 'canonical'),
]
)
assert mapping['alias'] == 'canonical'
assert mapping['canonical'] == 'canonical'
def test_resolve_edge_pointers_updates_sources():
created_at = utc_now()
edge = EntityEdge(
name='knows',
fact='fact',
group_id='group',
source_node_uuid='alias',
target_node_uuid='target',
created_at=created_at,
)
bulk_utils.resolve_edge_pointers([edge], {'alias': 'canonical'})
assert edge.source_node_uuid == 'canonical'
assert edge.target_node_uuid == 'target'
@pytest.mark.asyncio
async def test_dedupe_edges_bulk_deduplicates_within_episode(monkeypatch):
"""Test that dedupe_edges_bulk correctly compares edges within the same episode.
This test verifies the fix that removed the `if i == j: continue` check,
which was preventing edges from the same episode from being compared against each other.
"""
clients = _make_clients()
# Track which edges are compared
comparisons_made = []
# Create mock embedder that sets embedding values
async def mock_create_embeddings(embedder, edges):
for edge in edges:
edge.fact_embedding = [0.1, 0.2, 0.3]
monkeypatch.setattr(bulk_utils, 'create_entity_edge_embeddings', mock_create_embeddings)
# Mock resolve_extracted_edge to track comparisons and mark duplicates
async def mock_resolve_extracted_edge(
llm_client,
extracted_edge,
related_edges,
existing_edges,
episode,
edge_type_candidates=None,
custom_edge_type_names=None,
):
# Track that this edge was compared against the related_edges
comparisons_made.append((extracted_edge.uuid, [r.uuid for r in related_edges]))
# If there are related edges with same source/target/fact, mark as duplicate
for related in related_edges:
if (
related.uuid != extracted_edge.uuid # Can't be duplicate of self
and related.source_node_uuid == extracted_edge.source_node_uuid
and related.target_node_uuid == extracted_edge.target_node_uuid
and related.fact.strip().lower() == extracted_edge.fact.strip().lower()
):
# Return the related edge and mark extracted_edge as duplicate
return related, [], [related]
# Otherwise return the extracted edge as-is
return extracted_edge, [], []
monkeypatch.setattr(bulk_utils, 'resolve_extracted_edge', mock_resolve_extracted_edge)
episode = _make_episode('1')
source_uuid = 'source-uuid'
target_uuid = 'target-uuid'
# Create 3 identical edges within the same episode
edge1 = EntityEdge(
name='recommends',
fact='assistant recommends yoga poses',
group_id='group',
source_node_uuid=source_uuid,
target_node_uuid=target_uuid,
created_at=utc_now(),
episodes=[episode.uuid],
)
edge2 = EntityEdge(
name='recommends',
fact='assistant recommends yoga poses',
group_id='group',
source_node_uuid=source_uuid,
target_node_uuid=target_uuid,
created_at=utc_now(),
episodes=[episode.uuid],
)
edge3 = EntityEdge(
name='recommends',
fact='assistant recommends yoga poses',
group_id='group',
source_node_uuid=source_uuid,
target_node_uuid=target_uuid,
created_at=utc_now(),
episodes=[episode.uuid],
)
await bulk_utils.dedupe_edges_bulk(
clients,
[[edge1, edge2, edge3]],
[(episode, [])],
[],
{},
{},
)
# Verify that edges were compared against each other (within same episode)
# Each edge should have been compared against all 3 edges (including itself, which gets filtered)
assert len(comparisons_made) == 3
for _, compared_against in comparisons_made:
# Each edge should have access to all 3 edges as candidates
assert len(compared_against) >= 2 # At least 2 others (self is filtered out)
@pytest.mark.asyncio
async def test_extract_nodes_and_edges_bulk_passes_custom_instructions_to_extract_nodes(
monkeypatch,
):
"""Test that custom_extraction_instructions is passed to extract_nodes."""
clients = _make_clients()
episode = _make_episode('1')
# Track calls to extract_nodes
extract_nodes_calls = []
async def mock_extract_nodes(
clients,
episode,
previous_episodes,
entity_types=None,
excluded_entity_types=None,
custom_extraction_instructions=None,
):
extract_nodes_calls.append(
{
'entity_types': entity_types,
'excluded_entity_types': excluded_entity_types,
'custom_extraction_instructions': custom_extraction_instructions,
}
)
return []
async def mock_extract_edges(
clients,
episode,
nodes,
previous_episodes,
edge_type_map,
group_id='',
edge_types=None,
custom_extraction_instructions=None,
):
return []
monkeypatch.setattr(bulk_utils, 'extract_nodes', mock_extract_nodes)
monkeypatch.setattr(bulk_utils, 'extract_edges', mock_extract_edges)
custom_instructions = 'Focus on extracting person entities and their relationships.'
await extract_nodes_and_edges_bulk(
clients,
[(episode, [])],
edge_type_map={},
custom_extraction_instructions=custom_instructions,
)
assert len(extract_nodes_calls) == 1
assert extract_nodes_calls[0]['custom_extraction_instructions'] == custom_instructions
@pytest.mark.asyncio
async def test_extract_nodes_and_edges_bulk_passes_custom_instructions_to_extract_edges(
monkeypatch,
):
"""Test that custom_extraction_instructions is passed to extract_edges."""
clients = _make_clients()
episode = _make_episode('1')
# Track calls to extract_edges
extract_edges_calls = []
extracted_node = EntityNode(name='Test', group_id='group', labels=['Entity'])
async def mock_extract_nodes(
clients,
episode,
previous_episodes,
entity_types=None,
excluded_entity_types=None,
custom_extraction_instructions=None,
):
return [extracted_node]
async def mock_extract_edges(
clients,
episode,
nodes,
previous_episodes,
edge_type_map,
group_id='',
edge_types=None,
custom_extraction_instructions=None,
):
extract_edges_calls.append(
{
'nodes': nodes,
'edge_type_map': edge_type_map,
'edge_types': edge_types,
'custom_extraction_instructions': custom_extraction_instructions,
}
)
return []
monkeypatch.setattr(bulk_utils, 'extract_nodes', mock_extract_nodes)
monkeypatch.setattr(bulk_utils, 'extract_edges', mock_extract_edges)
custom_instructions = 'Extract only professional relationships between people.'
await extract_nodes_and_edges_bulk(
clients,
[(episode, [])],
edge_type_map={('Entity', 'Entity'): ['knows']},
custom_extraction_instructions=custom_instructions,
)
assert len(extract_edges_calls) == 1
assert extract_edges_calls[0]['custom_extraction_instructions'] == custom_instructions
assert extract_edges_calls[0]['nodes'] == [extracted_node]
@pytest.mark.asyncio
async def test_extract_nodes_and_edges_bulk_custom_instructions_none_by_default(monkeypatch):
"""Test that custom_extraction_instructions defaults to None when not provided."""
clients = _make_clients()
episode = _make_episode('1')
extract_nodes_calls = []
extract_edges_calls = []
async def mock_extract_nodes(
clients,
episode,
previous_episodes,
entity_types=None,
excluded_entity_types=None,
custom_extraction_instructions=None,
):
extract_nodes_calls.append(
{'custom_extraction_instructions': custom_extraction_instructions}
)
return []
async def mock_extract_edges(
clients,
episode,
nodes,
previous_episodes,
edge_type_map,
group_id='',
edge_types=None,
custom_extraction_instructions=None,
):
extract_edges_calls.append(
{'custom_extraction_instructions': custom_extraction_instructions}
)
return []
monkeypatch.setattr(bulk_utils, 'extract_nodes', mock_extract_nodes)
monkeypatch.setattr(bulk_utils, 'extract_edges', mock_extract_edges)
# Call without custom_extraction_instructions
await extract_nodes_and_edges_bulk(
clients,
[(episode, [])],
edge_type_map={},
)
assert len(extract_nodes_calls) == 1
assert extract_nodes_calls[0]['custom_extraction_instructions'] is None
assert len(extract_edges_calls) == 1
assert extract_edges_calls[0]['custom_extraction_instructions'] is None
@pytest.mark.asyncio
async def test_extract_nodes_and_edges_bulk_custom_instructions_multiple_episodes(monkeypatch):
"""Test that custom_extraction_instructions is passed for all episodes in bulk."""
clients = _make_clients()
episode1 = _make_episode('1')
episode2 = _make_episode('2')
episode3 = _make_episode('3')
extract_nodes_calls = []
extract_edges_calls = []
async def mock_extract_nodes(
clients,
episode,
previous_episodes,
entity_types=None,
excluded_entity_types=None,
custom_extraction_instructions=None,
):
extract_nodes_calls.append(
{
'episode_name': episode.name,
'custom_extraction_instructions': custom_extraction_instructions,
}
)
return []
async def mock_extract_edges(
clients,
episode,
nodes,
previous_episodes,
edge_type_map,
group_id='',
edge_types=None,
custom_extraction_instructions=None,
):
extract_edges_calls.append(
{
'episode_name': episode.name,
'custom_extraction_instructions': custom_extraction_instructions,
}
)
return []
monkeypatch.setattr(bulk_utils, 'extract_nodes', mock_extract_nodes)
monkeypatch.setattr(bulk_utils, 'extract_edges', mock_extract_edges)
custom_instructions = 'Extract entities related to financial transactions.'
await extract_nodes_and_edges_bulk(
clients,
[(episode1, []), (episode2, []), (episode3, [])],
edge_type_map={},
custom_extraction_instructions=custom_instructions,
)
# All 3 episodes should have received the custom instructions
assert len(extract_nodes_calls) == 3
assert len(extract_edges_calls) == 3
for call in extract_nodes_calls:
assert call['custom_extraction_instructions'] == custom_instructions
for call in extract_edges_calls:
assert call['custom_extraction_instructions'] == custom_instructions
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/utils/maintenance/test_bulk_utils.py",
"license": "Apache License 2.0",
"lines": 456,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/utils/maintenance/dedup_helpers.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import math
import re
from collections import defaultdict
from collections.abc import Iterable
from dataclasses import dataclass, field
from functools import lru_cache
from hashlib import blake2b
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from graphiti_core.nodes import EntityNode
_NAME_ENTROPY_THRESHOLD = 1.5
_MIN_NAME_LENGTH = 6
_MIN_TOKEN_COUNT = 2
_FUZZY_JACCARD_THRESHOLD = 0.9
_MINHASH_PERMUTATIONS = 32
_MINHASH_BAND_SIZE = 4
def _normalize_string_exact(name: str) -> str:
"""Lowercase text and collapse whitespace so equal names map to the same key."""
normalized = re.sub(r'[\s]+', ' ', name.lower())
return normalized.strip()
def _normalize_name_for_fuzzy(name: str) -> str:
"""Produce a fuzzier form that keeps alphanumerics and apostrophes for n-gram shingles."""
normalized = re.sub(r"[^a-z0-9' ]", ' ', _normalize_string_exact(name))
normalized = normalized.strip()
return re.sub(r'[\s]+', ' ', normalized)
def _name_entropy(normalized_name: str) -> float:
"""Approximate text specificity using Shannon entropy over characters.
We strip spaces, count how often each character appears, and sum
probability * -log2(probability). Short or repetitive names yield low
entropy, which signals we should defer resolution to the LLM instead of
trusting fuzzy similarity.
"""
if not normalized_name:
return 0.0
counts: dict[str, int] = {}
for char in normalized_name.replace(' ', ''):
counts[char] = counts.get(char, 0) + 1
total = sum(counts.values())
if total == 0:
return 0.0
entropy = 0.0
for count in counts.values():
probability = count / total
entropy -= probability * math.log2(probability)
return entropy
def _has_high_entropy(normalized_name: str) -> bool:
"""Filter out very short or low-entropy names that are unreliable for fuzzy matching."""
token_count = len(normalized_name.split())
if len(normalized_name) < _MIN_NAME_LENGTH and token_count < _MIN_TOKEN_COUNT:
return False
return _name_entropy(normalized_name) >= _NAME_ENTROPY_THRESHOLD
def _shingles(normalized_name: str) -> set[str]:
"""Create 3-gram shingles from the normalized name for MinHash calculations."""
cleaned = normalized_name.replace(' ', '')
if len(cleaned) < 2:
return {cleaned} if cleaned else set()
return {cleaned[i : i + 3] for i in range(len(cleaned) - 2)}
def _hash_shingle(shingle: str, seed: int) -> int:
"""Generate a deterministic 64-bit hash for a shingle given the permutation seed."""
digest = blake2b(f'{seed}:{shingle}'.encode(), digest_size=8)
return int.from_bytes(digest.digest(), 'big')
def _minhash_signature(shingles: Iterable[str]) -> tuple[int, ...]:
"""Compute the MinHash signature for the shingle set across predefined permutations."""
if not shingles:
return tuple()
seeds = range(_MINHASH_PERMUTATIONS)
signature: list[int] = []
for seed in seeds:
min_hash = min(_hash_shingle(shingle, seed) for shingle in shingles)
signature.append(min_hash)
return tuple(signature)
def _lsh_bands(signature: Iterable[int]) -> list[tuple[int, ...]]:
"""Split the MinHash signature into fixed-size bands for locality-sensitive hashing."""
signature_list = list(signature)
if not signature_list:
return []
bands: list[tuple[int, ...]] = []
for start in range(0, len(signature_list), _MINHASH_BAND_SIZE):
band = tuple(signature_list[start : start + _MINHASH_BAND_SIZE])
if len(band) == _MINHASH_BAND_SIZE:
bands.append(band)
return bands
def _jaccard_similarity(a: set[str], b: set[str]) -> float:
"""Return the Jaccard similarity between two shingle sets, handling empty edge cases."""
if not a and not b:
return 1.0
if not a or not b:
return 0.0
intersection = len(a.intersection(b))
union = len(a.union(b))
return intersection / union if union else 0.0
@lru_cache(maxsize=512)
def _cached_shingles(name: str) -> set[str]:
"""Cache shingle sets per normalized name to avoid recomputation within a worker."""
return _shingles(name)
@dataclass
class DedupCandidateIndexes:
"""Precomputed lookup structures that drive entity deduplication heuristics."""
existing_nodes: list[EntityNode]
nodes_by_uuid: dict[str, EntityNode]
normalized_existing: defaultdict[str, list[EntityNode]]
shingles_by_candidate: dict[str, set[str]]
lsh_buckets: defaultdict[tuple[int, tuple[int, ...]], list[str]]
@dataclass
class DedupResolutionState:
"""Mutable resolution bookkeeping shared across deterministic and LLM passes."""
resolved_nodes: list[EntityNode | None]
uuid_map: dict[str, str]
unresolved_indices: list[int]
duplicate_pairs: list[tuple[EntityNode, EntityNode]] = field(default_factory=list)
def _build_candidate_indexes(existing_nodes: list[EntityNode]) -> DedupCandidateIndexes:
"""Precompute exact and fuzzy lookup structures once per dedupe run."""
normalized_existing: defaultdict[str, list[EntityNode]] = defaultdict(list)
nodes_by_uuid: dict[str, EntityNode] = {}
shingles_by_candidate: dict[str, set[str]] = {}
lsh_buckets: defaultdict[tuple[int, tuple[int, ...]], list[str]] = defaultdict(list)
for candidate in existing_nodes:
normalized = _normalize_string_exact(candidate.name)
normalized_existing[normalized].append(candidate)
nodes_by_uuid[candidate.uuid] = candidate
shingles = _cached_shingles(_normalize_name_for_fuzzy(candidate.name))
shingles_by_candidate[candidate.uuid] = shingles
signature = _minhash_signature(shingles)
for band_index, band in enumerate(_lsh_bands(signature)):
lsh_buckets[(band_index, band)].append(candidate.uuid)
return DedupCandidateIndexes(
existing_nodes=existing_nodes,
nodes_by_uuid=nodes_by_uuid,
normalized_existing=normalized_existing,
shingles_by_candidate=shingles_by_candidate,
lsh_buckets=lsh_buckets,
)
def _resolve_with_similarity(
extracted_nodes: list[EntityNode],
indexes: DedupCandidateIndexes,
state: DedupResolutionState,
) -> None:
"""Attempt deterministic resolution using exact name hits and fuzzy MinHash comparisons."""
for idx, node in enumerate(extracted_nodes):
normalized_exact = _normalize_string_exact(node.name)
normalized_fuzzy = _normalize_name_for_fuzzy(node.name)
if not _has_high_entropy(normalized_fuzzy):
state.unresolved_indices.append(idx)
continue
existing_matches = indexes.normalized_existing.get(normalized_exact, [])
if len(existing_matches) == 1:
match = existing_matches[0]
state.resolved_nodes[idx] = match
state.uuid_map[node.uuid] = match.uuid
if match.uuid != node.uuid:
state.duplicate_pairs.append((node, match))
continue
if len(existing_matches) > 1:
state.unresolved_indices.append(idx)
continue
shingles = _cached_shingles(normalized_fuzzy)
signature = _minhash_signature(shingles)
candidate_ids: set[str] = set()
for band_index, band in enumerate(_lsh_bands(signature)):
candidate_ids.update(indexes.lsh_buckets.get((band_index, band), []))
best_candidate: EntityNode | None = None
best_score = 0.0
for candidate_id in candidate_ids:
candidate_shingles = indexes.shingles_by_candidate.get(candidate_id, set())
score = _jaccard_similarity(shingles, candidate_shingles)
if score > best_score:
best_score = score
best_candidate = indexes.nodes_by_uuid.get(candidate_id)
if best_candidate is not None and best_score >= _FUZZY_JACCARD_THRESHOLD:
state.resolved_nodes[idx] = best_candidate
state.uuid_map[node.uuid] = best_candidate.uuid
if best_candidate.uuid != node.uuid:
state.duplicate_pairs.append((node, best_candidate))
continue
state.unresolved_indices.append(idx)
__all__ = [
'DedupCandidateIndexes',
'DedupResolutionState',
'_normalize_string_exact',
'_normalize_name_for_fuzzy',
'_has_high_entropy',
'_minhash_signature',
'_lsh_bands',
'_jaccard_similarity',
'_cached_shingles',
'_FUZZY_JACCARD_THRESHOLD',
'_build_candidate_indexes',
'_resolve_with_similarity',
]
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/utils/maintenance/dedup_helpers.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/utils/maintenance/test_node_operations.py | import logging
from collections import defaultdict
from unittest.mock import AsyncMock, MagicMock
import pytest
from graphiti_core.graphiti_types import GraphitiClients
from graphiti_core.nodes import EntityNode, EpisodeType, EpisodicNode
from graphiti_core.search.search_config import SearchResults
from graphiti_core.utils.datetime_utils import utc_now
from graphiti_core.utils.maintenance.dedup_helpers import (
DedupCandidateIndexes,
DedupResolutionState,
_build_candidate_indexes,
_cached_shingles,
_has_high_entropy,
_hash_shingle,
_jaccard_similarity,
_lsh_bands,
_minhash_signature,
_name_entropy,
_normalize_name_for_fuzzy,
_normalize_string_exact,
_resolve_with_similarity,
_shingles,
)
from graphiti_core.utils.maintenance.node_operations import (
_collect_candidate_nodes,
_extract_entity_summaries_batch,
_resolve_with_llm,
extract_attributes_from_nodes,
resolve_extracted_nodes,
)
def _make_clients():
driver = MagicMock()
embedder = MagicMock()
cross_encoder = MagicMock()
llm_client = MagicMock()
llm_generate = AsyncMock()
llm_client.generate_response = llm_generate
clients = GraphitiClients.model_construct( # bypass validation to allow test doubles
driver=driver,
embedder=embedder,
cross_encoder=cross_encoder,
llm_client=llm_client,
)
return clients, llm_generate
def _make_episode(group_id: str = 'group'):
return EpisodicNode(
name='episode',
group_id=group_id,
source=EpisodeType.message,
source_description='test',
content='content',
valid_at=utc_now(),
)
@pytest.mark.asyncio
async def test_resolve_nodes_exact_match_skips_llm(monkeypatch):
clients, llm_generate = _make_clients()
candidate = EntityNode(name='Joe Michaels', group_id='group', labels=['Entity'])
extracted = EntityNode(name='Joe Michaels', group_id='group', labels=['Entity'])
async def fake_search(*_, **__):
return SearchResults(nodes=[candidate])
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.search',
fake_search,
)
resolved, uuid_map, _ = await resolve_extracted_nodes(
clients,
[extracted],
episode=_make_episode(),
previous_episodes=[],
)
assert resolved[0].uuid == candidate.uuid
assert uuid_map[extracted.uuid] == candidate.uuid
llm_generate.assert_not_awaited()
@pytest.mark.asyncio
async def test_resolve_nodes_low_entropy_uses_llm(monkeypatch):
clients, llm_generate = _make_clients()
llm_generate.return_value = {
'entity_resolutions': [
{
'id': 0,
'name': 'Joe',
'duplicate_name': '',
}
]
}
extracted = EntityNode(name='Joe', group_id='group', labels=['Entity'])
async def fake_search(*_, **__):
return SearchResults(nodes=[])
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.search',
fake_search,
)
resolved, uuid_map, _ = await resolve_extracted_nodes(
clients,
[extracted],
episode=_make_episode(),
previous_episodes=[],
)
assert resolved[0].uuid == extracted.uuid
assert uuid_map[extracted.uuid] == extracted.uuid
llm_generate.assert_awaited()
@pytest.mark.asyncio
async def test_resolve_nodes_fuzzy_match(monkeypatch):
clients, llm_generate = _make_clients()
candidate = EntityNode(name='Joe-Michaels', group_id='group', labels=['Entity'])
extracted = EntityNode(name='Joe Michaels', group_id='group', labels=['Entity'])
async def fake_search(*_, **__):
return SearchResults(nodes=[candidate])
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.search',
fake_search,
)
resolved, uuid_map, _ = await resolve_extracted_nodes(
clients,
[extracted],
episode=_make_episode(),
previous_episodes=[],
)
assert resolved[0].uuid == candidate.uuid
assert uuid_map[extracted.uuid] == candidate.uuid
llm_generate.assert_not_awaited()
@pytest.mark.asyncio
async def test_collect_candidate_nodes_dedupes_and_merges_override(monkeypatch):
clients, _ = _make_clients()
candidate = EntityNode(name='Alice', group_id='group', labels=['Entity'])
override_duplicate = EntityNode(
uuid=candidate.uuid,
name='Alice Alt',
group_id='group',
labels=['Entity'],
)
extracted = EntityNode(name='Alice', group_id='group', labels=['Entity'])
search_mock = AsyncMock(return_value=SearchResults(nodes=[candidate]))
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.search',
search_mock,
)
result = await _collect_candidate_nodes(
clients,
[extracted],
existing_nodes_override=[override_duplicate],
)
assert len(result) == 1
assert result[0].uuid == candidate.uuid
search_mock.assert_awaited()
def test_build_candidate_indexes_populates_structures():
candidate = EntityNode(name='Bob Dylan', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([candidate])
normalized_key = candidate.name.lower()
assert indexes.normalized_existing[normalized_key][0].uuid == candidate.uuid
assert indexes.nodes_by_uuid[candidate.uuid] is candidate
assert candidate.uuid in indexes.shingles_by_candidate
assert any(candidate.uuid in bucket for bucket in indexes.lsh_buckets.values())
def test_normalize_helpers():
assert _normalize_string_exact(' Alice Smith ') == 'alice smith'
assert _normalize_name_for_fuzzy('Alice-Smith!') == 'alice smith'
def test_name_entropy_variants():
assert _name_entropy('alice') > _name_entropy('aaaaa')
assert _name_entropy('') == 0.0
def test_has_high_entropy_rules():
assert _has_high_entropy('meaningful name') is True
assert _has_high_entropy('aa') is False
def test_shingles_and_cache():
raw = 'alice'
shingle_set = _shingles(raw)
assert shingle_set == {'ali', 'lic', 'ice'}
assert _cached_shingles(raw) == shingle_set
assert _cached_shingles(raw) is _cached_shingles(raw)
def test_hash_minhash_and_lsh():
shingles = {'abc', 'bcd', 'cde'}
signature = _minhash_signature(shingles)
assert len(signature) == 32
bands = _lsh_bands(signature)
assert all(len(band) == 4 for band in bands)
hashed = {_hash_shingle(s, 0) for s in shingles}
assert len(hashed) == len(shingles)
def test_jaccard_similarity_edges():
a = {'a', 'b'}
b = {'a', 'c'}
assert _jaccard_similarity(a, b) == pytest.approx(1 / 3)
assert _jaccard_similarity(set(), set()) == 1.0
assert _jaccard_similarity(a, set()) == 0.0
def test_resolve_with_similarity_exact_match_updates_state():
candidate = EntityNode(name='Charlie Parker', group_id='group', labels=['Entity'])
extracted = EntityNode(name='Charlie Parker', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([candidate])
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[])
_resolve_with_similarity([extracted], indexes, state)
assert state.resolved_nodes[0].uuid == candidate.uuid
assert state.uuid_map[extracted.uuid] == candidate.uuid
assert state.unresolved_indices == []
assert state.duplicate_pairs == [(extracted, candidate)]
def test_resolve_with_similarity_low_entropy_defers_resolution():
extracted = EntityNode(name='Bob', group_id='group', labels=['Entity'])
indexes = DedupCandidateIndexes(
existing_nodes=[],
nodes_by_uuid={},
normalized_existing=defaultdict(list),
shingles_by_candidate={},
lsh_buckets=defaultdict(list),
)
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[])
_resolve_with_similarity([extracted], indexes, state)
assert state.resolved_nodes[0] is None
assert state.unresolved_indices == [0]
assert state.duplicate_pairs == []
def test_resolve_with_similarity_multiple_exact_matches_defers_to_llm():
candidate1 = EntityNode(name='Johnny Appleseed', group_id='group', labels=['Entity'])
candidate2 = EntityNode(name='Johnny Appleseed', group_id='group', labels=['Entity'])
extracted = EntityNode(name='Johnny Appleseed', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([candidate1, candidate2])
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[])
_resolve_with_similarity([extracted], indexes, state)
assert state.resolved_nodes[0] is None
assert state.unresolved_indices == [0]
assert state.duplicate_pairs == []
@pytest.mark.asyncio
async def test_resolve_with_llm_updates_unresolved(monkeypatch):
extracted = EntityNode(name='Dizzy', group_id='group', labels=['Entity'])
candidate = EntityNode(name='Dizzy Gillespie', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([candidate])
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[0])
captured_context = {}
def fake_prompt_nodes(context):
captured_context.update(context)
return ['prompt']
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.prompt_library.dedupe_nodes.nodes',
fake_prompt_nodes,
)
async def fake_generate_response(*_, **__):
return {
'entity_resolutions': [
{
'id': 0,
'name': 'Dizzy Gillespie',
'duplicate_name': 'Dizzy Gillespie',
}
]
}
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(side_effect=fake_generate_response)
await _resolve_with_llm(
llm_client,
[extracted],
indexes,
state,
episode=_make_episode(),
previous_episodes=[],
entity_types=None,
)
assert state.resolved_nodes[0].uuid == candidate.uuid
assert state.uuid_map[extracted.uuid] == candidate.uuid
assert isinstance(captured_context['existing_nodes'], list)
assert state.duplicate_pairs == [(extracted, candidate)]
@pytest.mark.asyncio
async def test_resolve_with_llm_ignores_out_of_range_relative_ids(monkeypatch, caplog):
extracted = EntityNode(name='Dexter', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([])
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[0])
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.prompt_library.dedupe_nodes.nodes',
lambda context: ['prompt'],
)
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(
return_value={
'entity_resolutions': [
{
'id': 5,
'name': 'Dexter',
'duplicate_name': '',
}
]
}
)
with caplog.at_level(logging.WARNING):
await _resolve_with_llm(
llm_client,
[extracted],
indexes,
state,
episode=_make_episode(),
previous_episodes=[],
entity_types=None,
)
assert state.resolved_nodes[0] is None
assert 'Skipping invalid LLM dedupe id 5' in caplog.text
@pytest.mark.asyncio
async def test_resolve_with_llm_ignores_duplicate_relative_ids(monkeypatch):
extracted = EntityNode(name='Dizzy', group_id='group', labels=['Entity'])
candidate = EntityNode(name='Dizzy Gillespie', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([candidate])
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[0])
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.prompt_library.dedupe_nodes.nodes',
lambda context: ['prompt'],
)
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(
return_value={
'entity_resolutions': [
{
'id': 0,
'name': 'Dizzy Gillespie',
'duplicate_name': 'Dizzy Gillespie',
},
{
'id': 0,
'name': 'Dizzy',
'duplicate_name': '',
},
]
}
)
await _resolve_with_llm(
llm_client,
[extracted],
indexes,
state,
episode=_make_episode(),
previous_episodes=[],
entity_types=None,
)
assert state.resolved_nodes[0].uuid == candidate.uuid
assert state.uuid_map[extracted.uuid] == candidate.uuid
assert state.duplicate_pairs == [(extracted, candidate)]
@pytest.mark.asyncio
async def test_resolve_with_llm_invalid_duplicate_name_defaults_to_extracted(monkeypatch):
extracted = EntityNode(name='Dexter', group_id='group', labels=['Entity'])
indexes = _build_candidate_indexes([])
state = DedupResolutionState(resolved_nodes=[None], uuid_map={}, unresolved_indices=[0])
monkeypatch.setattr(
'graphiti_core.utils.maintenance.node_operations.prompt_library.dedupe_nodes.nodes',
lambda context: ['prompt'],
)
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(
return_value={
'entity_resolutions': [
{
'id': 0,
'name': 'Dexter',
'duplicate_name': 'NonExistent Entity',
}
]
}
)
await _resolve_with_llm(
llm_client,
[extracted],
indexes,
state,
episode=_make_episode(),
previous_episodes=[],
entity_types=None,
)
assert state.resolved_nodes[0] == extracted
assert state.uuid_map[extracted.uuid] == extracted.uuid
assert state.duplicate_pairs == []
@pytest.mark.asyncio
async def test_batch_summaries_short_summary_no_llm():
"""Test that short summaries are kept as-is without LLM call (optimization)."""
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(
return_value={'summaries': [{'name': 'Test Node', 'summary': 'Generated summary'}]}
)
node = EntityNode(name='Test Node', group_id='group', labels=['Entity'], summary='Old summary')
episode = _make_episode()
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=episode,
previous_episodes=[],
should_summarize_node=None,
edges_by_node={},
)
# Short summary should be kept as-is without LLM call
assert node.summary == 'Old summary'
# LLM should NOT have been called (summary is short enough)
llm_client.generate_response.assert_not_awaited()
@pytest.mark.asyncio
async def test_batch_summaries_callback_skip_summary():
"""Test that summary is NOT regenerated when callback returns False."""
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(
return_value={'summaries': [{'name': 'Test Node', 'summary': 'This should not be used'}]}
)
node = EntityNode(name='Test Node', group_id='group', labels=['Entity'], summary='Old summary')
episode = _make_episode()
# Callback that always returns False (skip summary generation)
async def skip_summary_filter(n: EntityNode) -> bool:
return False
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=episode,
previous_episodes=[],
should_summarize_node=skip_summary_filter,
edges_by_node={},
)
# Summary should remain unchanged
assert node.summary == 'Old summary'
# LLM should NOT have been called for summary
llm_client.generate_response.assert_not_awaited()
@pytest.mark.asyncio
async def test_batch_summaries_selective_callback():
"""Test callback that selectively skips summaries based on node properties."""
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(return_value={'summaries': []})
user_node = EntityNode(name='User', group_id='group', labels=['Entity', 'User'], summary='Old')
topic_node = EntityNode(
name='Topic', group_id='group', labels=['Entity', 'Topic'], summary='Old'
)
episode = _make_episode()
# Callback that skips User nodes but generates for others
async def selective_filter(n: EntityNode) -> bool:
return 'User' not in n.labels
await _extract_entity_summaries_batch(
llm_client,
[user_node, topic_node],
episode=episode,
previous_episodes=[],
should_summarize_node=selective_filter,
edges_by_node={},
)
# User summary should remain unchanged (callback returned False)
assert user_node.summary == 'Old'
# Topic summary should also remain unchanged (short summary optimization)
assert topic_node.summary == 'Old'
# LLM should NOT have been called (summaries are short enough)
llm_client.generate_response.assert_not_awaited()
@pytest.mark.asyncio
async def test_extract_attributes_from_nodes_with_callback():
"""Test that callback is properly passed through extract_attributes_from_nodes."""
clients, _ = _make_clients()
clients.llm_client.generate_response = AsyncMock(return_value={'summaries': []})
clients.embedder.create = AsyncMock(return_value=[0.1, 0.2, 0.3])
clients.embedder.create_batch = AsyncMock(return_value=[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
node1 = EntityNode(name='Node1', group_id='group', labels=['Entity', 'User'], summary='Old1')
node2 = EntityNode(name='Node2', group_id='group', labels=['Entity', 'Topic'], summary='Old2')
episode = _make_episode()
call_tracker = []
# Callback that tracks which nodes it's called with
async def tracking_filter(n: EntityNode) -> bool:
call_tracker.append(n.name)
return 'User' not in n.labels
results = await extract_attributes_from_nodes(
clients,
[node1, node2],
episode=episode,
previous_episodes=[],
entity_types=None,
should_summarize_node=tracking_filter,
)
# Callback should have been called for both nodes
assert len(call_tracker) == 2
assert 'Node1' in call_tracker
assert 'Node2' in call_tracker
# Both nodes should keep old summaries (short summary optimization skips LLM)
node1_result = next(n for n in results if n.name == 'Node1')
node2_result = next(n for n in results if n.name == 'Node2')
assert node1_result.summary == 'Old1'
assert node2_result.summary == 'Old2'
@pytest.mark.asyncio
async def test_batch_summaries_calls_llm_for_long_summary():
"""Test that LLM is called when summary exceeds character limit."""
from graphiti_core.edges import EntityEdge
from graphiti_core.utils.text_utils import MAX_SUMMARY_CHARS
llm_client = MagicMock()
llm_client.generate_response = AsyncMock(
return_value={'summaries': [{'name': 'Test Node', 'summary': 'Condensed summary'}]}
)
node = EntityNode(name='Test Node', group_id='group', labels=['Entity'], summary='Short')
episode = _make_episode()
# Create edges with long facts that exceed the threshold
long_fact = 'x' * (MAX_SUMMARY_CHARS * 2)
edge = EntityEdge(
uuid='edge1',
group_id='group',
source_node_uuid=node.uuid,
target_node_uuid='other-uuid',
name='test_edge',
fact=long_fact,
created_at=utc_now(),
)
edges_by_node = {node.uuid: [edge, edge]} # Multiple long edges
await _extract_entity_summaries_batch(
llm_client,
[node],
episode=episode,
previous_episodes=[],
should_summarize_node=None,
edges_by_node=edges_by_node,
)
# LLM should have been called to condense the long summary
llm_client.generate_response.assert_awaited_once()
assert node.summary == 'Condensed summary'
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/utils/maintenance/test_node_operations.py",
"license": "Apache License 2.0",
"lines": 499,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/driver/kuzu_driver.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Any
import kuzu
from graphiti_core.driver.driver import GraphDriver, GraphDriverSession, GraphProvider
from graphiti_core.driver.kuzu.operations.community_edge_ops import KuzuCommunityEdgeOperations
from graphiti_core.driver.kuzu.operations.community_node_ops import KuzuCommunityNodeOperations
from graphiti_core.driver.kuzu.operations.entity_edge_ops import KuzuEntityEdgeOperations
from graphiti_core.driver.kuzu.operations.entity_node_ops import KuzuEntityNodeOperations
from graphiti_core.driver.kuzu.operations.episode_node_ops import KuzuEpisodeNodeOperations
from graphiti_core.driver.kuzu.operations.episodic_edge_ops import KuzuEpisodicEdgeOperations
from graphiti_core.driver.kuzu.operations.graph_ops import KuzuGraphMaintenanceOperations
from graphiti_core.driver.kuzu.operations.has_episode_edge_ops import KuzuHasEpisodeEdgeOperations
from graphiti_core.driver.kuzu.operations.next_episode_edge_ops import (
KuzuNextEpisodeEdgeOperations,
)
from graphiti_core.driver.kuzu.operations.saga_node_ops import KuzuSagaNodeOperations
from graphiti_core.driver.kuzu.operations.search_ops import KuzuSearchOperations
from graphiti_core.driver.operations.community_edge_ops import CommunityEdgeOperations
from graphiti_core.driver.operations.community_node_ops import CommunityNodeOperations
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.operations.graph_ops import GraphMaintenanceOperations
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.operations.search_ops import SearchOperations
logger = logging.getLogger(__name__)
# Kuzu requires an explicit schema.
# As Kuzu currently does not support creating full text indexes on edge properties,
# we work around this by representing (n:Entity)-[:RELATES_TO]->(m:Entity) as
# (n)-[:RELATES_TO]->(e:RelatesToNode_)-[:RELATES_TO]->(m).
SCHEMA_QUERIES = """
CREATE NODE TABLE IF NOT EXISTS Episodic (
uuid STRING PRIMARY KEY,
name STRING,
group_id STRING,
created_at TIMESTAMP,
source STRING,
source_description STRING,
content STRING,
valid_at TIMESTAMP,
entity_edges STRING[]
);
CREATE NODE TABLE IF NOT EXISTS Entity (
uuid STRING PRIMARY KEY,
name STRING,
group_id STRING,
labels STRING[],
created_at TIMESTAMP,
name_embedding FLOAT[],
summary STRING,
attributes STRING
);
CREATE NODE TABLE IF NOT EXISTS Community (
uuid STRING PRIMARY KEY,
name STRING,
group_id STRING,
created_at TIMESTAMP,
name_embedding FLOAT[],
summary STRING
);
CREATE NODE TABLE IF NOT EXISTS RelatesToNode_ (
uuid STRING PRIMARY KEY,
group_id STRING,
created_at TIMESTAMP,
name STRING,
fact STRING,
fact_embedding FLOAT[],
episodes STRING[],
expired_at TIMESTAMP,
valid_at TIMESTAMP,
invalid_at TIMESTAMP,
attributes STRING
);
CREATE REL TABLE IF NOT EXISTS RELATES_TO(
FROM Entity TO RelatesToNode_,
FROM RelatesToNode_ TO Entity
);
CREATE REL TABLE IF NOT EXISTS MENTIONS(
FROM Episodic TO Entity,
uuid STRING PRIMARY KEY,
group_id STRING,
created_at TIMESTAMP
);
CREATE REL TABLE IF NOT EXISTS HAS_MEMBER(
FROM Community TO Entity,
FROM Community TO Community,
uuid STRING,
group_id STRING,
created_at TIMESTAMP
);
CREATE NODE TABLE IF NOT EXISTS Saga (
uuid STRING PRIMARY KEY,
name STRING,
group_id STRING,
created_at TIMESTAMP
);
CREATE REL TABLE IF NOT EXISTS HAS_EPISODE(
FROM Saga TO Episodic,
uuid STRING,
group_id STRING,
created_at TIMESTAMP
);
CREATE REL TABLE IF NOT EXISTS NEXT_EPISODE(
FROM Episodic TO Episodic,
uuid STRING,
group_id STRING,
created_at TIMESTAMP
);
"""
class KuzuDriver(GraphDriver):
provider: GraphProvider = GraphProvider.KUZU
aoss_client: None = None
def __init__(
self,
db: str = ':memory:',
max_concurrent_queries: int = 1,
):
super().__init__()
self.db = kuzu.Database(db)
self.setup_schema()
self.client = kuzu.AsyncConnection(self.db, max_concurrent_queries=max_concurrent_queries)
# Instantiate Kuzu operations
self._entity_node_ops = KuzuEntityNodeOperations()
self._episode_node_ops = KuzuEpisodeNodeOperations()
self._community_node_ops = KuzuCommunityNodeOperations()
self._saga_node_ops = KuzuSagaNodeOperations()
self._entity_edge_ops = KuzuEntityEdgeOperations()
self._episodic_edge_ops = KuzuEpisodicEdgeOperations()
self._community_edge_ops = KuzuCommunityEdgeOperations()
self._has_episode_edge_ops = KuzuHasEpisodeEdgeOperations()
self._next_episode_edge_ops = KuzuNextEpisodeEdgeOperations()
self._search_ops = KuzuSearchOperations()
self._graph_ops = KuzuGraphMaintenanceOperations()
# --- Operations properties ---
@property
def entity_node_ops(self) -> EntityNodeOperations:
return self._entity_node_ops
@property
def episode_node_ops(self) -> EpisodeNodeOperations:
return self._episode_node_ops
@property
def community_node_ops(self) -> CommunityNodeOperations:
return self._community_node_ops
@property
def saga_node_ops(self) -> SagaNodeOperations:
return self._saga_node_ops
@property
def entity_edge_ops(self) -> EntityEdgeOperations:
return self._entity_edge_ops
@property
def episodic_edge_ops(self) -> EpisodicEdgeOperations:
return self._episodic_edge_ops
@property
def community_edge_ops(self) -> CommunityEdgeOperations:
return self._community_edge_ops
@property
def has_episode_edge_ops(self) -> HasEpisodeEdgeOperations:
return self._has_episode_edge_ops
@property
def next_episode_edge_ops(self) -> NextEpisodeEdgeOperations:
return self._next_episode_edge_ops
@property
def search_ops(self) -> SearchOperations:
return self._search_ops
@property
def graph_ops(self) -> GraphMaintenanceOperations:
return self._graph_ops
async def execute_query(
self, cypher_query_: str, **kwargs: Any
) -> tuple[list[dict[str, Any]] | list[list[dict[str, Any]]], None, None]:
params = {k: v for k, v in kwargs.items() if v is not None}
# Kuzu does not support these parameters.
params.pop('database_', None)
params.pop('routing_', None)
try:
results = await self.client.execute(cypher_query_, parameters=params)
except Exception as e:
params = {k: (v[:5] if isinstance(v, list) else v) for k, v in params.items()}
logger.error(f'Error executing Kuzu query: {e}\n{cypher_query_}\n{params}')
raise
if not results:
return [], None, None
if isinstance(results, list):
dict_results = [list(result.rows_as_dict()) for result in results]
else:
dict_results = list(results.rows_as_dict())
return dict_results, None, None # type: ignore
def session(self, _database: str | None = None) -> GraphDriverSession:
return KuzuDriverSession(self)
async def close(self):
# Do not explicitly close the connection, instead rely on GC.
pass
def delete_all_indexes(self, database_: str):
pass
async def build_indices_and_constraints(self, delete_existing: bool = False):
# Kuzu doesn't support dynamic index creation like Neo4j or FalkorDB
# Schema and indices are created during setup_schema()
# This method is required by the abstract base class but is a no-op for Kuzu
pass
def setup_schema(self):
conn = kuzu.Connection(self.db)
conn.execute(SCHEMA_QUERIES)
conn.close()
class KuzuDriverSession(GraphDriverSession):
provider = GraphProvider.KUZU
def __init__(self, driver: KuzuDriver):
self.driver = driver
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
# No cleanup needed for Kuzu, but method must exist.
pass
async def close(self):
# Do not close the session here, as we're reusing the driver connection.
pass
async def execute_write(self, func, *args, **kwargs):
# Directly await the provided async function with `self` as the transaction/session
return await func(self, *args, **kwargs)
async def run(self, query: str | list, **kwargs: Any) -> Any:
if isinstance(query, list):
for cypher, params in query:
await self.driver.execute_query(cypher, **params)
else:
await self.driver.execute_query(query, **kwargs)
return None
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/kuzu_driver.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/test_graphiti_mock.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from unittest.mock import Mock
import numpy as np
import pytest
from graphiti_core.cross_encoder.client import CrossEncoderClient
from graphiti_core.edges import CommunityEdge, EntityEdge, EpisodicEdge
from graphiti_core.graphiti import Graphiti
from graphiti_core.llm_client import LLMClient
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodeType, EpisodicNode
from graphiti_core.search.search_filters import ComparisonOperator, DateFilter, SearchFilters
from graphiti_core.search.search_utils import (
community_fulltext_search,
community_similarity_search,
edge_bfs_search,
edge_fulltext_search,
edge_similarity_search,
episode_fulltext_search,
episode_mentions_reranker,
get_communities_by_nodes,
get_edge_invalidation_candidates,
get_embeddings_for_communities,
get_embeddings_for_edges,
get_embeddings_for_nodes,
get_mentioned_nodes,
get_relevant_edges,
get_relevant_nodes,
node_bfs_search,
node_distance_reranker,
node_fulltext_search,
node_similarity_search,
)
from graphiti_core.utils.bulk_utils import add_nodes_and_edges_bulk
from graphiti_core.utils.maintenance.community_operations import (
determine_entity_community,
get_community_clusters,
remove_communities,
)
from graphiti_core.utils.maintenance.edge_operations import filter_existing_duplicate_of_edges
from tests.helpers_test import (
GraphProvider,
assert_entity_edge_equals,
assert_entity_node_equals,
assert_episodic_edge_equals,
assert_episodic_node_equals,
get_edge_count,
get_node_count,
group_id,
group_id_2,
)
pytest_plugins = ('pytest_asyncio',)
@pytest.fixture
def mock_llm_client():
"""Create a mock LLM"""
mock_llm = Mock(spec=LLMClient)
mock_llm.config = Mock()
mock_llm.model = 'test-model'
mock_llm.small_model = 'test-small-model'
mock_llm.temperature = 0.0
mock_llm.max_tokens = 1000
mock_llm.cache_enabled = False
mock_llm.cache_dir = None
# Mock the public method that's actually called
mock_llm.generate_response = Mock()
mock_llm.generate_response.return_value = {
'tool_calls': [
{
'name': 'extract_entities',
'arguments': {'entities': [{'entity': 'test_entity', 'entity_type': 'test_type'}]},
}
]
}
return mock_llm
@pytest.fixture
def mock_cross_encoder_client():
"""Create a mock LLM"""
mock_llm = Mock(spec=CrossEncoderClient)
mock_llm.config = Mock()
# Mock the public method that's actually called
mock_llm.rerank = Mock()
mock_llm.rerank.return_value = {
'tool_calls': [
{
'name': 'extract_entities',
'arguments': {'entities': [{'entity': 'test_entity', 'entity_type': 'test_type'}]},
}
]
}
return mock_llm
@pytest.mark.asyncio
async def test_add_bulk(graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as test fails on FalkorDB')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create episodic nodes
episode_node_1 = EpisodicNode(
name='test_episode',
group_id=group_id,
labels=[],
created_at=now,
source=EpisodeType.message,
source_description='conversation message',
content='Alice likes Bob',
valid_at=now,
entity_edges=[], # Filled in later
)
episode_node_2 = EpisodicNode(
name='test_episode_2',
group_id=group_id,
labels=[],
created_at=now,
source=EpisodeType.message,
source_description='conversation message',
content='Bob adores Alice',
valid_at=now,
entity_edges=[], # Filled in later
)
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
group_id=group_id,
labels=['Entity', 'Person'],
created_at=now,
summary='test_entity_1 summary',
attributes={'age': 30, 'location': 'New York'},
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
group_id=group_id,
labels=['Entity', 'Person2'],
created_at=now,
summary='test_entity_2 summary',
attributes={'age': 25, 'location': 'Los Angeles'},
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
group_id=group_id,
labels=['Entity', 'City', 'Location'],
created_at=now,
summary='test_entity_3 summary',
attributes={'age': 25, 'location': 'Los Angeles'},
)
await entity_node_3.generate_name_embedding(mock_embedder)
entity_node_4 = EntityNode(
name='test_entity_4',
group_id=group_id,
labels=['Entity'],
created_at=now,
summary='test_entity_4 summary',
attributes={'age': 25, 'location': 'Los Angeles'},
)
await entity_node_4.generate_name_embedding(mock_embedder)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
created_at=now,
name='likes',
fact='test_entity_1 relates to test_entity_2',
episodes=[],
expired_at=now,
valid_at=now,
invalid_at=now,
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
entity_edge_2 = EntityEdge(
source_node_uuid=entity_node_3.uuid,
target_node_uuid=entity_node_4.uuid,
created_at=now,
name='relates_to',
fact='test_entity_3 relates to test_entity_4',
episodes=[],
expired_at=now,
valid_at=now,
invalid_at=now,
group_id=group_id,
)
await entity_edge_2.generate_embedding(mock_embedder)
# Create episodic to entity edges
episodic_edge_1 = EpisodicEdge(
source_node_uuid=episode_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=now,
group_id=group_id,
)
episodic_edge_2 = EpisodicEdge(
source_node_uuid=episode_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
created_at=now,
group_id=group_id,
)
episodic_edge_3 = EpisodicEdge(
source_node_uuid=episode_node_2.uuid,
target_node_uuid=entity_node_3.uuid,
created_at=now,
group_id=group_id,
)
episodic_edge_4 = EpisodicEdge(
source_node_uuid=episode_node_2.uuid,
target_node_uuid=entity_node_4.uuid,
created_at=now,
group_id=group_id,
)
# Cross reference the ids
episode_node_1.entity_edges = [entity_edge_1.uuid]
episode_node_2.entity_edges = [entity_edge_2.uuid]
entity_edge_1.episodes = [episode_node_1.uuid, episode_node_2.uuid]
entity_edge_2.episodes = [episode_node_2.uuid]
# Test add bulk
await add_nodes_and_edges_bulk(
graph_driver,
[episode_node_1, episode_node_2],
[episodic_edge_1, episodic_edge_2, episodic_edge_3, episodic_edge_4],
[entity_node_1, entity_node_2, entity_node_3, entity_node_4],
[entity_edge_1, entity_edge_2],
mock_embedder,
)
node_ids = [
episode_node_1.uuid,
episode_node_2.uuid,
entity_node_1.uuid,
entity_node_2.uuid,
entity_node_3.uuid,
entity_node_4.uuid,
]
edge_ids = [
episodic_edge_1.uuid,
episodic_edge_2.uuid,
episodic_edge_3.uuid,
episodic_edge_4.uuid,
entity_edge_1.uuid,
entity_edge_2.uuid,
]
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == len(node_ids)
edge_count = await get_edge_count(graph_driver, edge_ids)
assert edge_count == len(edge_ids)
# Test episodic nodes
retrieved_episode = await EpisodicNode.get_by_uuid(graph_driver, episode_node_1.uuid)
await assert_episodic_node_equals(retrieved_episode, episode_node_1)
retrieved_episode = await EpisodicNode.get_by_uuid(graph_driver, episode_node_2.uuid)
await assert_episodic_node_equals(retrieved_episode, episode_node_2)
# Test entity nodes
retrieved_entity_node = await EntityNode.get_by_uuid(graph_driver, entity_node_1.uuid)
await assert_entity_node_equals(graph_driver, retrieved_entity_node, entity_node_1)
retrieved_entity_node = await EntityNode.get_by_uuid(graph_driver, entity_node_2.uuid)
await assert_entity_node_equals(graph_driver, retrieved_entity_node, entity_node_2)
retrieved_entity_node = await EntityNode.get_by_uuid(graph_driver, entity_node_3.uuid)
await assert_entity_node_equals(graph_driver, retrieved_entity_node, entity_node_3)
retrieved_entity_node = await EntityNode.get_by_uuid(graph_driver, entity_node_4.uuid)
await assert_entity_node_equals(graph_driver, retrieved_entity_node, entity_node_4)
# Test episodic edges
retrieved_episode_edge = await EpisodicEdge.get_by_uuid(graph_driver, episodic_edge_1.uuid)
await assert_episodic_edge_equals(retrieved_episode_edge, episodic_edge_1)
retrieved_episode_edge = await EpisodicEdge.get_by_uuid(graph_driver, episodic_edge_2.uuid)
await assert_episodic_edge_equals(retrieved_episode_edge, episodic_edge_2)
retrieved_episode_edge = await EpisodicEdge.get_by_uuid(graph_driver, episodic_edge_3.uuid)
await assert_episodic_edge_equals(retrieved_episode_edge, episodic_edge_3)
retrieved_episode_edge = await EpisodicEdge.get_by_uuid(graph_driver, episodic_edge_4.uuid)
await assert_episodic_edge_equals(retrieved_episode_edge, episodic_edge_4)
# Test entity edges
retrieved_entity_edge = await EntityEdge.get_by_uuid(graph_driver, entity_edge_1.uuid)
await assert_entity_edge_equals(graph_driver, retrieved_entity_edge, entity_edge_1)
retrieved_entity_edge = await EntityEdge.get_by_uuid(graph_driver, entity_edge_2.uuid)
await assert_entity_edge_equals(graph_driver, retrieved_entity_edge, entity_edge_2)
@pytest.mark.asyncio
async def test_remove_episode(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
# Create episodic nodes
episode_node = EpisodicNode(
name='test_episode',
group_id=group_id,
labels=[],
created_at=now,
source=EpisodeType.message,
source_description='conversation message',
content='Alice likes Bob',
valid_at=now,
entity_edges=[], # Filled in later
)
# Create entity nodes
alice_node = EntityNode(
name='Alice',
group_id=group_id,
labels=['Entity', 'Person'],
created_at=now,
summary='Alice summary',
attributes={'age': 30, 'location': 'New York'},
)
await alice_node.generate_name_embedding(mock_embedder)
bob_node = EntityNode(
name='Bob',
group_id=group_id,
labels=['Entity', 'Person2'],
created_at=now,
summary='Bob summary',
attributes={'age': 25, 'location': 'Los Angeles'},
)
await bob_node.generate_name_embedding(mock_embedder)
# Create entity to entity edge
entity_edge = EntityEdge(
source_node_uuid=alice_node.uuid,
target_node_uuid=bob_node.uuid,
created_at=now,
name='likes',
fact='Alice likes Bob',
episodes=[],
expired_at=now,
valid_at=now,
invalid_at=now,
group_id=group_id,
)
await entity_edge.generate_embedding(mock_embedder)
# Create episodic to entity edges
episodic_alice_edge = EpisodicEdge(
source_node_uuid=episode_node.uuid,
target_node_uuid=alice_node.uuid,
created_at=now,
group_id=group_id,
)
episodic_bob_edge = EpisodicEdge(
source_node_uuid=episode_node.uuid,
target_node_uuid=bob_node.uuid,
created_at=now,
group_id=group_id,
)
# Cross reference the ids
episode_node.entity_edges = [entity_edge.uuid]
entity_edge.episodes = [episode_node.uuid]
# Test add bulk
await add_nodes_and_edges_bulk(
graph_driver,
[episode_node],
[episodic_alice_edge, episodic_bob_edge],
[alice_node, bob_node],
[entity_edge],
mock_embedder,
)
node_ids = [episode_node.uuid, alice_node.uuid, bob_node.uuid]
edge_ids = [episodic_alice_edge.uuid, episodic_bob_edge.uuid, entity_edge.uuid]
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 3
edge_count = await get_edge_count(graph_driver, edge_ids)
assert edge_count == 3
# Test remove episode
await graphiti.remove_episode(episode_node.uuid)
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 0
edge_count = await get_edge_count(graph_driver, edge_ids)
assert edge_count == 0
# Test add bulk again
await add_nodes_and_edges_bulk(
graph_driver,
[episode_node],
[episodic_alice_edge, episodic_bob_edge],
[alice_node, bob_node],
[entity_edge],
mock_embedder,
)
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 3
edge_count = await get_edge_count(graph_driver, edge_ids)
assert edge_count == 3
@pytest.mark.asyncio
async def test_graphiti_retrieve_episodes(
graph_driver, mock_llm_client, mock_embedder, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as test fails on FalkorDB')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
now = datetime.now()
valid_at_1 = now - timedelta(days=2)
valid_at_2 = now - timedelta(days=4)
valid_at_3 = now - timedelta(days=6)
# Create episodic nodes
episode_node_1 = EpisodicNode(
name='test_episode_1',
labels=[],
created_at=now,
valid_at=valid_at_1,
source=EpisodeType.message,
source_description='conversation message',
content='Test message 1',
entity_edges=[],
group_id=group_id,
)
episode_node_2 = EpisodicNode(
name='test_episode_2',
labels=[],
created_at=now,
valid_at=valid_at_2,
source=EpisodeType.message,
source_description='conversation message',
content='Test message 2',
entity_edges=[],
group_id=group_id,
)
episode_node_3 = EpisodicNode(
name='test_episode_3',
labels=[],
created_at=now,
valid_at=valid_at_3,
source=EpisodeType.message,
source_description='conversation message',
content='Test message 3',
entity_edges=[],
group_id=group_id,
)
# Save the nodes
await episode_node_1.save(graph_driver)
await episode_node_2.save(graph_driver)
await episode_node_3.save(graph_driver)
node_ids = [episode_node_1.uuid, episode_node_2.uuid, episode_node_3.uuid]
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 3
# Retrieve episodes
query_time = now - timedelta(days=3)
episodes = await graphiti.retrieve_episodes(
query_time, last_n=5, group_ids=[group_id], source=EpisodeType.message
)
assert len(episodes) == 2
assert episodes[0].name == episode_node_3.name
assert episodes[1].name == episode_node_2.name
@pytest.mark.asyncio
async def test_filter_existing_duplicate_of_edges(graph_driver, mock_embedder):
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
entity_node_4 = EntityNode(
name='test_entity_4',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_4.generate_name_embedding(mock_embedder)
# Save the nodes
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_node_4.save(graph_driver)
node_ids = [entity_node_1.uuid, entity_node_2.uuid, entity_node_3.uuid, entity_node_4.uuid]
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 4
# Create duplicate entity edge
entity_edge = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='IS_DUPLICATE_OF',
fact='test_entity_1 is a duplicate of test_entity_2',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge.generate_embedding(mock_embedder)
await entity_edge.save(graph_driver)
# Filter duplicate entity edges
duplicate_node_tuples = [
(entity_node_1, entity_node_2),
(entity_node_3, entity_node_4),
]
node_tuples = await filter_existing_duplicate_of_edges(graph_driver, duplicate_node_tuples)
assert len(node_tuples) == 1
assert [node.name for node in node_tuples[0]] == [entity_node_3.name, entity_node_4.name]
@pytest.mark.asyncio
async def test_determine_entity_community(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as test fails on FalkorDB')
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
entity_node_4 = EntityNode(
name='test_entity_4',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_4.generate_name_embedding(mock_embedder)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_4.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_4',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
entity_edge_2 = EntityEdge(
source_node_uuid=entity_node_2.uuid,
target_node_uuid=entity_node_4.uuid,
name='RELATES_TO',
fact='test_entity_2 relates to test_entity_4',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_2.generate_embedding(mock_embedder)
entity_edge_3 = EntityEdge(
source_node_uuid=entity_node_3.uuid,
target_node_uuid=entity_node_4.uuid,
name='RELATES_TO',
fact='test_entity_3 relates to test_entity_4',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_3.generate_embedding(mock_embedder)
# Create community nodes
community_node_1 = CommunityNode(
name='test_community_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await community_node_1.generate_name_embedding(mock_embedder)
community_node_2 = CommunityNode(
name='test_community_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await community_node_2.generate_name_embedding(mock_embedder)
# Create community to entity edges
community_edge_1 = CommunityEdge(
source_node_uuid=community_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=datetime.now(),
group_id=group_id,
)
community_edge_2 = CommunityEdge(
source_node_uuid=community_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
created_at=datetime.now(),
group_id=group_id,
)
community_edge_3 = CommunityEdge(
source_node_uuid=community_node_2.uuid,
target_node_uuid=entity_node_3.uuid,
created_at=datetime.now(),
group_id=group_id,
)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_node_4.save(graph_driver)
await community_node_1.save(graph_driver)
await community_node_2.save(graph_driver)
await entity_edge_1.save(graph_driver)
await entity_edge_2.save(graph_driver)
await entity_edge_3.save(graph_driver)
await community_edge_1.save(graph_driver)
await community_edge_2.save(graph_driver)
await community_edge_3.save(graph_driver)
node_ids = [
entity_node_1.uuid,
entity_node_2.uuid,
entity_node_3.uuid,
entity_node_4.uuid,
community_node_1.uuid,
community_node_2.uuid,
]
edge_ids = [
entity_edge_1.uuid,
entity_edge_2.uuid,
entity_edge_3.uuid,
community_edge_1.uuid,
community_edge_2.uuid,
community_edge_3.uuid,
]
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 6
edge_count = await get_edge_count(graph_driver, edge_ids)
assert edge_count == 6
# Determine entity community
community, is_new = await determine_entity_community(graph_driver, entity_node_4)
assert community.name == community_node_1.name
assert is_new
# Add entity to community edge
community_edge_4 = CommunityEdge(
source_node_uuid=community_node_1.uuid,
target_node_uuid=entity_node_4.uuid,
created_at=datetime.now(),
group_id=group_id,
)
await community_edge_4.save(graph_driver)
# Determine entity community again
community, is_new = await determine_entity_community(graph_driver, entity_node_4)
assert community.name == community_node_1.name
assert not is_new
await remove_communities(graph_driver)
node_count = await get_node_count(graph_driver, [community_node_1.uuid, community_node_2.uuid])
assert node_count == 0
@pytest.mark.asyncio
async def test_get_community_clusters(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as test fails on FalkorDB')
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id_2,
)
await entity_node_3.generate_name_embedding(mock_embedder)
entity_node_4 = EntityNode(
name='test_entity_4',
labels=[],
created_at=datetime.now(),
group_id=group_id_2,
)
await entity_node_4.generate_name_embedding(mock_embedder)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
entity_edge_2 = EntityEdge(
source_node_uuid=entity_node_3.uuid,
target_node_uuid=entity_node_4.uuid,
name='RELATES_TO',
fact='test_entity_3 relates to test_entity_4',
created_at=datetime.now(),
group_id=group_id_2,
)
await entity_edge_2.generate_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_node_4.save(graph_driver)
await entity_edge_1.save(graph_driver)
await entity_edge_2.save(graph_driver)
node_ids = [entity_node_1.uuid, entity_node_2.uuid, entity_node_3.uuid, entity_node_4.uuid]
edge_ids = [entity_edge_1.uuid, entity_edge_2.uuid]
node_count = await get_node_count(graph_driver, node_ids)
assert node_count == 4
edge_count = await get_edge_count(graph_driver, edge_ids)
assert edge_count == 2
# Get community clusters
clusters = await get_community_clusters(graph_driver, group_ids=None)
assert len(clusters) == 2
assert len(clusters[0]) == 2
assert len(clusters[1]) == 2
entities_1 = set([node.name for node in clusters[0]])
entities_2 = set([node.name for node in clusters[1]])
assert entities_1 == set(['test_entity_1', 'test_entity_2']) or entities_2 == set(
['test_entity_1', 'test_entity_2']
)
assert entities_1 == set(['test_entity_3', 'test_entity_4']) or entities_2 == set(
['test_entity_3', 'test_entity_4']
)
@pytest.mark.asyncio
async def test_get_mentioned_nodes(graph_driver, mock_embedder):
# Create episodic nodes
episodic_node_1 = EpisodicNode(
name='test_episodic_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
source=EpisodeType.message,
source_description='test_source_description',
content='test_content',
valid_at=datetime.now(),
)
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
# Create episodic to entity edges
episodic_edge_1 = EpisodicEdge(
source_node_uuid=episodic_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=datetime.now(),
group_id=group_id,
)
# Save the graph
await episodic_node_1.save(graph_driver)
await entity_node_1.save(graph_driver)
await episodic_edge_1.save(graph_driver)
# Get mentioned nodes
mentioned_nodes = await get_mentioned_nodes(graph_driver, [episodic_node_1])
assert len(mentioned_nodes) == 1
assert mentioned_nodes[0].name == entity_node_1.name
@pytest.mark.asyncio
async def test_get_communities_by_nodes(graph_driver, mock_embedder):
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
# Create community nodes
community_node_1 = CommunityNode(
name='test_community_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await community_node_1.generate_name_embedding(mock_embedder)
# Create community to entity edges
community_edge_1 = CommunityEdge(
source_node_uuid=community_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=datetime.now(),
group_id=group_id,
)
# Save the graph
await entity_node_1.save(graph_driver)
await community_node_1.save(graph_driver)
await community_edge_1.save(graph_driver)
# Get communities by nodes
communities = await get_communities_by_nodes(graph_driver, [entity_node_1])
assert len(communities) == 1
assert communities[0].name == community_node_1.name
@pytest.mark.asyncio
async def test_edge_fulltext_search(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.KUZU:
pytest.skip('Skipping as fulltext indexing not supported for Kuzu')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
now = datetime.now()
created_at = now
expired_at = now + timedelta(days=6)
valid_at = now + timedelta(days=2)
invalid_at = now + timedelta(days=4)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=created_at,
valid_at=valid_at,
invalid_at=invalid_at,
expired_at=expired_at,
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_edge_1.save(graph_driver)
# Search for entity edges
search_filters = SearchFilters(
node_labels=['Entity'],
edge_types=['RELATES_TO'],
created_at=[
[DateFilter(date=created_at, comparison_operator=ComparisonOperator.equals)],
],
expired_at=[
[DateFilter(date=now, comparison_operator=ComparisonOperator.not_equals)],
],
valid_at=[
[
DateFilter(
date=now + timedelta(days=1),
comparison_operator=ComparisonOperator.greater_than_equal,
)
],
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.less_than_equal,
)
],
],
invalid_at=[
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.greater_than,
)
],
[
DateFilter(
date=now + timedelta(days=5), comparison_operator=ComparisonOperator.less_than
)
],
],
)
edges = await edge_fulltext_search(
graph_driver, 'test_entity_1 relates to test_entity_2', search_filters, group_ids=[group_id]
)
assert len(edges) == 1
assert edges[0].name == entity_edge_1.name
@pytest.mark.asyncio
async def test_edge_similarity_search(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
now = datetime.now()
created_at = now
expired_at = now + timedelta(days=6)
valid_at = now + timedelta(days=2)
invalid_at = now + timedelta(days=4)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=created_at,
valid_at=valid_at,
invalid_at=invalid_at,
expired_at=expired_at,
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_edge_1.save(graph_driver)
# Search for entity edges
search_filters = SearchFilters(
node_labels=['Entity'],
edge_types=['RELATES_TO'],
created_at=[
[DateFilter(date=created_at, comparison_operator=ComparisonOperator.equals)],
],
expired_at=[
[DateFilter(date=now, comparison_operator=ComparisonOperator.not_equals)],
],
valid_at=[
[
DateFilter(
date=now + timedelta(days=1),
comparison_operator=ComparisonOperator.greater_than_equal,
)
],
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.less_than_equal,
)
],
],
invalid_at=[
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.greater_than,
)
],
[
DateFilter(
date=now + timedelta(days=5), comparison_operator=ComparisonOperator.less_than
)
],
],
)
edges = await edge_similarity_search(
graph_driver,
entity_edge_1.fact_embedding,
entity_node_1.uuid,
entity_node_2.uuid,
search_filters,
group_ids=[group_id],
)
assert len(edges) == 1
assert edges[0].name == entity_edge_1.name
@pytest.mark.asyncio
async def test_edge_bfs_search(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
# Create episodic nodes
episodic_node_1 = EpisodicNode(
name='test_episodic_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
source=EpisodeType.message,
source_description='test_source_description',
content='test_content',
valid_at=datetime.now(),
)
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
now = datetime.now()
created_at = now
expired_at = now + timedelta(days=6)
valid_at = now + timedelta(days=2)
invalid_at = now + timedelta(days=4)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=created_at,
valid_at=valid_at,
invalid_at=invalid_at,
expired_at=expired_at,
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
entity_edge_2 = EntityEdge(
source_node_uuid=entity_node_2.uuid,
target_node_uuid=entity_node_3.uuid,
name='RELATES_TO',
fact='test_entity_2 relates to test_entity_3',
created_at=created_at,
valid_at=valid_at,
invalid_at=invalid_at,
expired_at=expired_at,
group_id=group_id,
)
await entity_edge_2.generate_embedding(mock_embedder)
# Create episodic to entity edges
episodic_edge_1 = EpisodicEdge(
source_node_uuid=episodic_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=datetime.now(),
group_id=group_id,
)
# Save the graph
await episodic_node_1.save(graph_driver)
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_edge_1.save(graph_driver)
await entity_edge_2.save(graph_driver)
await episodic_edge_1.save(graph_driver)
# Search for entity edges
search_filters = SearchFilters(
node_labels=['Entity'],
edge_types=['RELATES_TO'],
created_at=[
[DateFilter(date=created_at, comparison_operator=ComparisonOperator.equals)],
],
expired_at=[
[DateFilter(date=now, comparison_operator=ComparisonOperator.not_equals)],
],
valid_at=[
[
DateFilter(
date=now + timedelta(days=1),
comparison_operator=ComparisonOperator.greater_than_equal,
)
],
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.less_than_equal,
)
],
],
invalid_at=[
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.greater_than,
)
],
[
DateFilter(
date=now + timedelta(days=5), comparison_operator=ComparisonOperator.less_than
)
],
],
)
# Test bfs from episodic node
edges = await edge_bfs_search(
graph_driver,
[episodic_node_1.uuid],
1,
search_filters,
group_ids=[group_id],
)
assert len(edges) == 0
edges = await edge_bfs_search(
graph_driver,
[episodic_node_1.uuid],
2,
search_filters,
group_ids=[group_id],
)
edges_deduplicated = set({edge.uuid: edge.fact for edge in edges}.values())
assert len(edges_deduplicated) == 1
assert edges_deduplicated == {'test_entity_1 relates to test_entity_2'}
edges = await edge_bfs_search(
graph_driver,
[episodic_node_1.uuid],
3,
search_filters,
group_ids=[group_id],
)
edges_deduplicated = set({edge.uuid: edge.fact for edge in edges}.values())
assert len(edges_deduplicated) == 2
assert edges_deduplicated == {
'test_entity_1 relates to test_entity_2',
'test_entity_2 relates to test_entity_3',
}
# Test bfs from entity node
edges = await edge_bfs_search(
graph_driver,
[entity_node_1.uuid],
1,
search_filters,
group_ids=[group_id],
)
edges_deduplicated = set({edge.uuid: edge.fact for edge in edges}.values())
assert len(edges_deduplicated) == 1
assert edges_deduplicated == {'test_entity_1 relates to test_entity_2'}
edges = await edge_bfs_search(
graph_driver,
[entity_node_1.uuid],
2,
search_filters,
group_ids=[group_id],
)
edges_deduplicated = set({edge.uuid: edge.fact for edge in edges}.values())
assert len(edges_deduplicated) == 2
assert edges_deduplicated == {
'test_entity_1 relates to test_entity_2',
'test_entity_2 relates to test_entity_3',
}
@pytest.mark.asyncio
async def test_node_fulltext_search(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.KUZU:
pytest.skip('Skipping as fulltext indexing not supported for Kuzu')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
summary='Summary about Alice',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
summary='Summary about Bob',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
# Search for entity edges
search_filters = SearchFilters(node_labels=['Entity'])
nodes = await node_fulltext_search(
graph_driver,
'Alice',
search_filters,
group_ids=[group_id],
)
assert len(nodes) == 1
assert nodes[0].name == entity_node_1.name
@pytest.mark.asyncio
async def test_node_similarity_search(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_alice',
summary='Summary about Alice',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_bob',
summary='Summary about Bob',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
# Search for entity edges
search_filters = SearchFilters(node_labels=['Entity'])
nodes = await node_similarity_search(
graph_driver,
entity_node_1.name_embedding,
search_filters,
group_ids=[group_id],
min_score=0.9,
)
assert len(nodes) == 1
assert nodes[0].name == entity_node_1.name
@pytest.mark.asyncio
async def test_node_bfs_search(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
# Create episodic nodes
episodic_node_1 = EpisodicNode(
name='test_episodic_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
source=EpisodeType.message,
source_description='test_source_description',
content='test_content',
valid_at=datetime.now(),
)
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
entity_edge_2 = EntityEdge(
source_node_uuid=entity_node_2.uuid,
target_node_uuid=entity_node_3.uuid,
name='RELATES_TO',
fact='test_entity_2 relates to test_entity_3',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_2.generate_embedding(mock_embedder)
# Create episodic to entity edges
episodic_edge_1 = EpisodicEdge(
source_node_uuid=episodic_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=datetime.now(),
group_id=group_id,
)
# Save the graph
await episodic_node_1.save(graph_driver)
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_edge_1.save(graph_driver)
await entity_edge_2.save(graph_driver)
await episodic_edge_1.save(graph_driver)
# Search for entity nodes
search_filters = SearchFilters(
node_labels=['Entity'],
)
# Test bfs from episodic node
nodes = await node_bfs_search(
graph_driver,
[episodic_node_1.uuid],
search_filters,
1,
group_ids=[group_id],
)
nodes_deduplicated = set({node.uuid: node.name for node in nodes}.values())
assert len(nodes_deduplicated) == 1
assert nodes_deduplicated == {'test_entity_1'}
nodes = await node_bfs_search(
graph_driver,
[episodic_node_1.uuid],
search_filters,
2,
group_ids=[group_id],
)
nodes_deduplicated = set({node.uuid: node.name for node in nodes}.values())
assert len(nodes_deduplicated) == 2
assert nodes_deduplicated == {'test_entity_1', 'test_entity_2'}
# Test bfs from entity node
nodes = await node_bfs_search(
graph_driver,
[entity_node_1.uuid],
search_filters,
1,
group_ids=[group_id],
)
nodes_deduplicated = set({node.uuid: node.name for node in nodes}.values())
assert len(nodes_deduplicated) == 1
assert nodes_deduplicated == {'test_entity_2'}
@pytest.mark.asyncio
async def test_episode_fulltext_search(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.KUZU:
pytest.skip('Skipping as fulltext indexing not supported for Kuzu')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create episodic nodes
episodic_node_1 = EpisodicNode(
name='test_episodic_1',
content='test_content',
created_at=datetime.now(),
valid_at=datetime.now(),
group_id=group_id,
source=EpisodeType.message,
source_description='Description about Alice',
)
episodic_node_2 = EpisodicNode(
name='test_episodic_2',
content='test_content_2',
created_at=datetime.now(),
valid_at=datetime.now(),
group_id=group_id,
source=EpisodeType.message,
source_description='Description about Bob',
)
# Save the graph
await episodic_node_1.save(graph_driver)
await episodic_node_2.save(graph_driver)
# Search for episodic nodes
search_filters = SearchFilters(node_labels=['Episodic'])
nodes = await episode_fulltext_search(
graph_driver,
'Alice',
search_filters,
group_ids=[group_id],
)
assert len(nodes) == 1
assert nodes[0].name == episodic_node_1.name
@pytest.mark.asyncio
async def test_community_fulltext_search(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.KUZU:
pytest.skip('Skipping as fulltext indexing not supported for Kuzu')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create community nodes
community_node_1 = CommunityNode(
name='Alice',
created_at=datetime.now(),
group_id=group_id,
)
await community_node_1.generate_name_embedding(mock_embedder)
community_node_2 = CommunityNode(
name='Bob',
created_at=datetime.now(),
group_id=group_id,
)
await community_node_2.generate_name_embedding(mock_embedder)
# Save the graph
await community_node_1.save(graph_driver)
await community_node_2.save(graph_driver)
# Search for community nodes
nodes = await community_fulltext_search(
graph_driver,
'Alice',
group_ids=[group_id],
)
assert len(nodes) == 1
assert nodes[0].name == community_node_1.name
@pytest.mark.asyncio
async def test_community_similarity_search(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create community nodes
community_node_1 = CommunityNode(
name='Alice',
created_at=datetime.now(),
group_id=group_id,
)
await community_node_1.generate_name_embedding(mock_embedder)
community_node_2 = CommunityNode(
name='Bob',
created_at=datetime.now(),
group_id=group_id,
)
await community_node_2.generate_name_embedding(mock_embedder)
# Save the graph
await community_node_1.save(graph_driver)
await community_node_2.save(graph_driver)
# Search for community nodes
nodes = await community_similarity_search(
graph_driver,
community_node_1.name_embedding,
group_ids=[group_id],
min_score=0.9,
)
assert len(nodes) == 1
assert nodes[0].name == community_node_1.name
@pytest.mark.asyncio
async def test_get_relevant_nodes(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
if graph_driver.provider == GraphProvider.KUZU:
pytest.skip('Skipping as tests fail on Kuzu')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create entity nodes
entity_node_1 = EntityNode(
name='Alice',
summary='Alice',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='Bob',
summary='Bob',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='Alice Smith',
summary='Alice Smith',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
# Search for entity nodes
search_filters = SearchFilters(node_labels=['Entity'])
nodes = (
await get_relevant_nodes(
graph_driver,
[entity_node_1],
search_filters,
min_score=0.9,
)
)[0]
assert len(nodes) == 2
assert set({node.name for node in nodes}) == {entity_node_1.name, entity_node_3.name}
@pytest.mark.asyncio
async def test_get_relevant_edges_and_invalidation_candidates(
graph_driver, mock_embedder, mock_llm_client, mock_cross_encoder_client
):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
graphiti = Graphiti(
graph_driver=graph_driver,
llm_client=mock_llm_client,
embedder=mock_embedder,
cross_encoder=mock_cross_encoder_client,
)
await graphiti.build_indices_and_constraints()
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
summary='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
summary='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
summary='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
now = datetime.now()
created_at = now
expired_at = now + timedelta(days=6)
valid_at = now + timedelta(days=2)
invalid_at = now + timedelta(days=4)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='Alice',
created_at=created_at,
expired_at=expired_at,
valid_at=valid_at,
invalid_at=invalid_at,
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
entity_edge_2 = EntityEdge(
source_node_uuid=entity_node_2.uuid,
target_node_uuid=entity_node_3.uuid,
name='RELATES_TO',
fact='Bob',
created_at=created_at,
expired_at=expired_at,
valid_at=valid_at,
invalid_at=invalid_at,
group_id=group_id,
)
await entity_edge_2.generate_embedding(mock_embedder)
entity_edge_3 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_3.uuid,
name='RELATES_TO',
fact='Alice',
created_at=created_at,
expired_at=expired_at,
valid_at=valid_at,
invalid_at=invalid_at,
group_id=group_id,
)
await entity_edge_3.generate_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_edge_1.save(graph_driver)
await entity_edge_2.save(graph_driver)
await entity_edge_3.save(graph_driver)
# Search for entity nodes
search_filters = SearchFilters(
node_labels=['Entity'],
edge_types=['RELATES_TO'],
created_at=[
[DateFilter(date=created_at, comparison_operator=ComparisonOperator.equals)],
],
expired_at=[
[DateFilter(date=now, comparison_operator=ComparisonOperator.not_equals)],
],
valid_at=[
[
DateFilter(
date=now + timedelta(days=1),
comparison_operator=ComparisonOperator.greater_than_equal,
)
],
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.less_than_equal,
)
],
],
invalid_at=[
[
DateFilter(
date=now + timedelta(days=3),
comparison_operator=ComparisonOperator.greater_than,
)
],
[
DateFilter(
date=now + timedelta(days=5), comparison_operator=ComparisonOperator.less_than
)
],
],
)
edges = (
await get_relevant_edges(
graph_driver,
[entity_edge_1],
search_filters,
min_score=0.9,
)
)[0]
assert len(edges) == 1
assert set({edge.name for edge in edges}) == {entity_edge_1.name}
edges = (
await get_edge_invalidation_candidates(
graph_driver,
[entity_edge_1],
search_filters,
min_score=0.9,
)
)[0]
assert len(edges) == 2
assert set({edge.name for edge in edges}) == {entity_edge_1.name, entity_edge_3.name}
@pytest.mark.asyncio
async def test_node_distance_reranker(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
entity_node_3 = EntityNode(
name='test_entity_3',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_3.generate_name_embedding(mock_embedder)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_node_3.save(graph_driver)
await entity_edge_1.save(graph_driver)
# Test reranker
reranked_uuids, reranked_scores = await node_distance_reranker(
graph_driver,
[entity_node_2.uuid, entity_node_3.uuid],
entity_node_1.uuid,
)
uuid_to_name = {
entity_node_1.uuid: entity_node_1.name,
entity_node_2.uuid: entity_node_2.name,
entity_node_3.uuid: entity_node_3.name,
}
names = [uuid_to_name[uuid] for uuid in reranked_uuids]
assert names == [entity_node_2.name, entity_node_3.name]
assert np.allclose(reranked_scores, [1.0, 0.0])
@pytest.mark.asyncio
async def test_episode_mentions_reranker(graph_driver, mock_embedder):
if graph_driver.provider == GraphProvider.FALKORDB:
pytest.skip('Skipping as tests fail on Falkordb')
# Create episodic nodes
episodic_node_1 = EpisodicNode(
name='test_episodic_1',
content='test_content',
created_at=datetime.now(),
valid_at=datetime.now(),
group_id=group_id,
source=EpisodeType.message,
source_description='Description about Alice',
)
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
# Create entity edges
episodic_edge_1 = EpisodicEdge(
source_node_uuid=episodic_node_1.uuid,
target_node_uuid=entity_node_1.uuid,
created_at=datetime.now(),
group_id=group_id,
)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await episodic_node_1.save(graph_driver)
await episodic_edge_1.save(graph_driver)
# Test reranker
reranked_uuids, reranked_scores = await episode_mentions_reranker(
graph_driver,
[[entity_node_1.uuid, entity_node_2.uuid]],
)
uuid_to_name = {entity_node_1.uuid: entity_node_1.name, entity_node_2.uuid: entity_node_2.name}
names = [uuid_to_name[uuid] for uuid in reranked_uuids]
assert names == [entity_node_1.name, entity_node_2.name]
assert np.allclose(reranked_scores, [1.0, float('inf')])
@pytest.mark.asyncio
async def test_get_embeddings_for_edges(graph_driver, mock_embedder):
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
entity_node_2 = EntityNode(
name='test_entity_2',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_2.generate_name_embedding(mock_embedder)
# Create entity edges
entity_edge_1 = EntityEdge(
source_node_uuid=entity_node_1.uuid,
target_node_uuid=entity_node_2.uuid,
name='RELATES_TO',
fact='test_entity_1 relates to test_entity_2',
created_at=datetime.now(),
group_id=group_id,
)
await entity_edge_1.generate_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
await entity_node_2.save(graph_driver)
await entity_edge_1.save(graph_driver)
# Get embeddings for edges
embeddings = await get_embeddings_for_edges(graph_driver, [entity_edge_1])
assert len(embeddings) == 1
assert entity_edge_1.uuid in embeddings
assert np.allclose(embeddings[entity_edge_1.uuid], entity_edge_1.fact_embedding)
@pytest.mark.asyncio
async def test_get_embeddings_for_nodes(graph_driver, mock_embedder):
# Create entity nodes
entity_node_1 = EntityNode(
name='test_entity_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await entity_node_1.generate_name_embedding(mock_embedder)
# Save the graph
await entity_node_1.save(graph_driver)
# Get embeddings for edges
embeddings = await get_embeddings_for_nodes(graph_driver, [entity_node_1])
assert len(embeddings) == 1
assert entity_node_1.uuid in embeddings
assert np.allclose(embeddings[entity_node_1.uuid], entity_node_1.name_embedding)
@pytest.mark.asyncio
async def test_get_embeddings_for_communities(graph_driver, mock_embedder):
# Create community nodes
community_node_1 = CommunityNode(
name='test_community_1',
labels=[],
created_at=datetime.now(),
group_id=group_id,
)
await community_node_1.generate_name_embedding(mock_embedder)
# Save the graph
await community_node_1.save(graph_driver)
# Get embeddings for communities
embeddings = await get_embeddings_for_communities(graph_driver, [community_node_1])
assert len(embeddings) == 1
assert community_node_1.uuid in embeddings
assert np.allclose(embeddings[community_node_1.uuid], community_node_1.name_embedding)
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/test_graphiti_mock.py",
"license": "Apache License 2.0",
"lines": 1850,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:examples/quickstart/quickstart_neptune.py | """
Copyright 2025, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import json
import logging
import os
from datetime import datetime, timezone
from logging import INFO
from dotenv import load_dotenv
from graphiti_core import Graphiti
from graphiti_core.driver.neptune_driver import NeptuneDriver
from graphiti_core.nodes import EpisodeType
from graphiti_core.search.search_config_recipes import NODE_HYBRID_SEARCH_RRF
#################################################
# CONFIGURATION
#################################################
# Set up logging and environment variables for
# connecting to Neptune database
#################################################
# Configure logging
logging.basicConfig(
level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
load_dotenv()
# Neptune and OpenSearch connection parameters
neptune_uri = os.environ.get('NEPTUNE_HOST')
neptune_port = int(os.environ.get('NEPTUNE_PORT', 8182))
aoss_host = os.environ.get('AOSS_HOST')
if not neptune_uri:
raise ValueError('NEPTUNE_HOST must be set')
if not aoss_host:
raise ValueError('AOSS_HOST must be set')
async def main():
#################################################
# INITIALIZATION
#################################################
# Connect to Neptune and set up Graphiti indices
# This is required before using other Graphiti
# functionality
#################################################
# Initialize Graphiti with Neptune connection
driver = NeptuneDriver(host=neptune_uri, aoss_host=aoss_host, port=neptune_port)
graphiti = Graphiti(graph_driver=driver)
try:
# Initialize the graph database with graphiti's indices. This only needs to be done once.
await driver.delete_aoss_indices()
await driver._delete_all_data()
await graphiti.build_indices_and_constraints()
#################################################
# ADDING EPISODES
#################################################
# Episodes are the primary units of information
# in Graphiti. They can be text or structured JSON
# and are automatically processed to extract entities
# and relationships.
#################################################
# Example: Add Episodes
# Episodes list containing both text and JSON episodes
episodes = [
{
'content': 'Kamala Harris is the Attorney General of California. She was previously '
'the district attorney for San Francisco.',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': 'As AG, Harris was in office from January 3, 2011 – January 3, 2017',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': {
'name': 'Gavin Newsom',
'position': 'Governor',
'state': 'California',
'previous_role': 'Lieutenant Governor',
'previous_location': 'San Francisco',
},
'type': EpisodeType.json,
'description': 'podcast metadata',
},
{
'content': {
'name': 'Gavin Newsom',
'position': 'Governor',
'term_start': 'January 7, 2019',
'term_end': 'Present',
},
'type': EpisodeType.json,
'description': 'podcast metadata',
},
]
# Add episodes to the graph
for i, episode in enumerate(episodes):
await graphiti.add_episode(
name=f'Freakonomics Radio {i}',
episode_body=episode['content']
if isinstance(episode['content'], str)
else json.dumps(episode['content']),
source=episode['type'],
source_description=episode['description'],
reference_time=datetime.now(timezone.utc),
)
print(f'Added episode: Freakonomics Radio {i} ({episode["type"].value})')
await graphiti.build_communities()
#################################################
# BASIC SEARCH
#################################################
# The simplest way to retrieve relationships (edges)
# from Graphiti is using the search method, which
# performs a hybrid search combining semantic
# similarity and BM25 text retrieval.
#################################################
# Perform a hybrid search combining semantic similarity and BM25 retrieval
print("\nSearching for: 'Who was the California Attorney General?'")
results = await graphiti.search('Who was the California Attorney General?')
# Print search results
print('\nSearch Results:')
for result in results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
#################################################
# CENTER NODE SEARCH
#################################################
# For more contextually relevant results, you can
# use a center node to rerank search results based
# on their graph distance to a specific node
#################################################
# Use the top search result's UUID as the center node for reranking
if results and len(results) > 0:
# Get the source node UUID from the top result
center_node_uuid = results[0].source_node_uuid
print('\nReranking search results based on graph distance:')
print(f'Using center node UUID: {center_node_uuid}')
reranked_results = await graphiti.search(
'Who was the California Attorney General?', center_node_uuid=center_node_uuid
)
# Print reranked search results
print('\nReranked Search Results:')
for result in reranked_results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
else:
print('No results found in the initial search to use as center node.')
#################################################
# NODE SEARCH USING SEARCH RECIPES
#################################################
# Graphiti provides predefined search recipes
# optimized for different search scenarios.
# Here we use NODE_HYBRID_SEARCH_RRF for retrieving
# nodes directly instead of edges.
#################################################
# Example: Perform a node search using _search method with standard recipes
print(
'\nPerforming node search using _search method with standard recipe NODE_HYBRID_SEARCH_RRF:'
)
# Use a predefined search configuration recipe and modify its limit
node_search_config = NODE_HYBRID_SEARCH_RRF.model_copy(deep=True)
node_search_config.limit = 5 # Limit to 5 results
# Execute the node search
node_search_results = await graphiti._search(
query='California Governor',
config=node_search_config,
)
# Print node search results
print('\nNode Search Results:')
for node in node_search_results.nodes:
print(f'Node UUID: {node.uuid}')
print(f'Node Name: {node.name}')
node_summary = node.summary[:100] + '...' if len(node.summary) > 100 else node.summary
print(f'Content Summary: {node_summary}')
print(f'Node Labels: {", ".join(node.labels)}')
print(f'Created At: {node.created_at}')
if hasattr(node, 'attributes') and node.attributes:
print('Attributes:')
for key, value in node.attributes.items():
print(f' {key}: {value}')
print('---')
finally:
#################################################
# CLEANUP
#################################################
# Always close the connection to Neptune when
# finished to properly release resources
#################################################
# Close the connection
await graphiti.close()
print('\nConnection closed')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "examples/quickstart/quickstart_neptune.py",
"license": "Apache License 2.0",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/neptune_driver.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import datetime
import logging
from collections.abc import Coroutine
from typing import Any
import boto3
from langchain_aws.graphs import NeptuneAnalyticsGraph, NeptuneGraph
from opensearchpy import OpenSearch, Urllib3AWSV4SignerAuth, Urllib3HttpConnection, helpers
from graphiti_core.driver.driver import GraphDriver, GraphDriverSession, GraphProvider
from graphiti_core.driver.neptune.operations.community_edge_ops import (
NeptuneCommunityEdgeOperations,
)
from graphiti_core.driver.neptune.operations.community_node_ops import (
NeptuneCommunityNodeOperations,
)
from graphiti_core.driver.neptune.operations.entity_edge_ops import NeptuneEntityEdgeOperations
from graphiti_core.driver.neptune.operations.entity_node_ops import NeptuneEntityNodeOperations
from graphiti_core.driver.neptune.operations.episode_node_ops import NeptuneEpisodeNodeOperations
from graphiti_core.driver.neptune.operations.episodic_edge_ops import NeptuneEpisodicEdgeOperations
from graphiti_core.driver.neptune.operations.graph_ops import NeptuneGraphMaintenanceOperations
from graphiti_core.driver.neptune.operations.has_episode_edge_ops import (
NeptuneHasEpisodeEdgeOperations,
)
from graphiti_core.driver.neptune.operations.next_episode_edge_ops import (
NeptuneNextEpisodeEdgeOperations,
)
from graphiti_core.driver.neptune.operations.saga_node_ops import NeptuneSagaNodeOperations
from graphiti_core.driver.neptune.operations.search_ops import NeptuneSearchOperations
from graphiti_core.driver.operations.community_edge_ops import CommunityEdgeOperations
from graphiti_core.driver.operations.community_node_ops import CommunityNodeOperations
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.operations.graph_ops import GraphMaintenanceOperations
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.operations.search_ops import SearchOperations
logger = logging.getLogger(__name__)
DEFAULT_SIZE = 10
aoss_indices = [
{
'index_name': 'node_name_and_summary',
'body': {
'mappings': {
'properties': {
'uuid': {'type': 'keyword'},
'name': {'type': 'text'},
'summary': {'type': 'text'},
'group_id': {'type': 'text'},
}
}
},
'query': {
'query': {'multi_match': {'query': '', 'fields': ['name', 'summary', 'group_id']}},
'size': DEFAULT_SIZE,
},
},
{
'index_name': 'community_name',
'body': {
'mappings': {
'properties': {
'uuid': {'type': 'keyword'},
'name': {'type': 'text'},
'group_id': {'type': 'text'},
}
}
},
'query': {
'query': {'multi_match': {'query': '', 'fields': ['name', 'group_id']}},
'size': DEFAULT_SIZE,
},
},
{
'index_name': 'episode_content',
'body': {
'mappings': {
'properties': {
'uuid': {'type': 'keyword'},
'content': {'type': 'text'},
'source': {'type': 'text'},
'source_description': {'type': 'text'},
'group_id': {'type': 'text'},
}
}
},
'query': {
'query': {
'multi_match': {
'query': '',
'fields': ['content', 'source', 'source_description', 'group_id'],
}
},
'size': DEFAULT_SIZE,
},
},
{
'index_name': 'edge_name_and_fact',
'body': {
'mappings': {
'properties': {
'uuid': {'type': 'keyword'},
'name': {'type': 'text'},
'fact': {'type': 'text'},
'group_id': {'type': 'text'},
}
}
},
'query': {
'query': {'multi_match': {'query': '', 'fields': ['name', 'fact', 'group_id']}},
'size': DEFAULT_SIZE,
},
},
]
class NeptuneDriver(GraphDriver):
provider: GraphProvider = GraphProvider.NEPTUNE
def __init__(self, host: str, aoss_host: str, port: int = 8182, aoss_port: int = 443):
"""This initializes a NeptuneDriver for use with Neptune as a backend
Args:
host (str): The Neptune Database or Neptune Analytics host
aoss_host (str): The OpenSearch host value
port (int, optional): The Neptune Database port, ignored for Neptune Analytics. Defaults to 8182.
aoss_port (int, optional): The OpenSearch port. Defaults to 443.
"""
if not host:
raise ValueError('You must provide an endpoint to create a NeptuneDriver')
if host.startswith('neptune-db://'):
# This is a Neptune Database Cluster
endpoint = host.replace('neptune-db://', '')
self.client = NeptuneGraph(endpoint, port)
logger.debug('Creating Neptune Database session for %s', host)
elif host.startswith('neptune-graph://'):
# This is a Neptune Analytics Graph
graphId = host.replace('neptune-graph://', '')
self.client = NeptuneAnalyticsGraph(graphId)
logger.debug('Creating Neptune Graph session for %s', host)
else:
raise ValueError(
'You must provide an endpoint to create a NeptuneDriver as either neptune-db://<endpoint> or neptune-graph://<graphid>'
)
if not aoss_host:
raise ValueError('You must provide an AOSS endpoint to create an OpenSearch driver.')
session = boto3.Session()
self.aoss_client = OpenSearch(
hosts=[{'host': aoss_host, 'port': aoss_port}],
http_auth=Urllib3AWSV4SignerAuth(
session.get_credentials(), session.region_name, 'aoss'
),
use_ssl=True,
verify_certs=True,
connection_class=Urllib3HttpConnection,
pool_maxsize=20,
)
# Instantiate Neptune operations
self._entity_node_ops = NeptuneEntityNodeOperations()
self._episode_node_ops = NeptuneEpisodeNodeOperations()
self._community_node_ops = NeptuneCommunityNodeOperations(driver=self)
self._saga_node_ops = NeptuneSagaNodeOperations()
self._entity_edge_ops = NeptuneEntityEdgeOperations()
self._episodic_edge_ops = NeptuneEpisodicEdgeOperations()
self._community_edge_ops = NeptuneCommunityEdgeOperations()
self._has_episode_edge_ops = NeptuneHasEpisodeEdgeOperations()
self._next_episode_edge_ops = NeptuneNextEpisodeEdgeOperations()
self._search_ops = NeptuneSearchOperations(driver=self)
self._graph_ops = NeptuneGraphMaintenanceOperations(driver=self)
# --- Operations properties ---
@property
def entity_node_ops(self) -> EntityNodeOperations:
return self._entity_node_ops
@property
def episode_node_ops(self) -> EpisodeNodeOperations:
return self._episode_node_ops
@property
def community_node_ops(self) -> CommunityNodeOperations:
return self._community_node_ops
@property
def saga_node_ops(self) -> SagaNodeOperations:
return self._saga_node_ops
@property
def entity_edge_ops(self) -> EntityEdgeOperations:
return self._entity_edge_ops
@property
def episodic_edge_ops(self) -> EpisodicEdgeOperations:
return self._episodic_edge_ops
@property
def community_edge_ops(self) -> CommunityEdgeOperations:
return self._community_edge_ops
@property
def has_episode_edge_ops(self) -> HasEpisodeEdgeOperations:
return self._has_episode_edge_ops
@property
def next_episode_edge_ops(self) -> NextEpisodeEdgeOperations:
return self._next_episode_edge_ops
@property
def search_ops(self) -> SearchOperations:
return self._search_ops
@property
def graph_ops(self) -> GraphMaintenanceOperations:
return self._graph_ops
def _sanitize_parameters(self, query, params: dict):
if isinstance(query, list):
queries = []
for q in query:
queries.append(self._sanitize_parameters(q, params))
return queries
else:
for k, v in params.items():
if isinstance(v, datetime.datetime):
params[k] = v.isoformat()
elif isinstance(v, list):
# Handle lists that might contain datetime objects
for i, item in enumerate(v):
if isinstance(item, datetime.datetime):
v[i] = item.isoformat()
query = str(query).replace(f'${k}', f'datetime(${k})')
if isinstance(item, dict):
query = self._sanitize_parameters(query, v[i])
# If the list contains datetime objects, we need to wrap each element with datetime()
if any(isinstance(item, str) and 'T' in item for item in v):
# Create a new list expression with datetime() wrapped around each element
datetime_list = (
'['
+ ', '.join(
f'datetime("{item}")'
if isinstance(item, str) and 'T' in item
else repr(item)
for item in v
)
+ ']'
)
query = str(query).replace(f'${k}', datetime_list)
elif isinstance(v, dict):
query = self._sanitize_parameters(query, v)
return query
async def execute_query(
self, cypher_query_, **kwargs: Any
) -> tuple[list[dict[str, Any]], None, None]:
params = dict(kwargs)
if isinstance(cypher_query_, list):
result: list[dict[str, Any]] = []
for q in cypher_query_:
result, _, _ = self._run_query(q[0], q[1])
return result, None, None
else:
return self._run_query(cypher_query_, params)
def _run_query(self, cypher_query_, params):
cypher_query_ = str(self._sanitize_parameters(cypher_query_, params))
try:
result = self.client.query(cypher_query_, params=params)
except Exception as e:
logger.error('Query: %s', cypher_query_)
logger.error('Parameters: %s', params)
logger.error('Error executing query: %s', e)
raise e
return result, None, None
def session(self, database: str | None = None) -> GraphDriverSession:
return NeptuneDriverSession(driver=self)
async def close(self) -> None:
return self.client.client.close()
async def _delete_all_data(self) -> Any:
return await self.execute_query('MATCH (n) DETACH DELETE n')
def delete_all_indexes(self) -> Coroutine[Any, Any, Any]:
return self.delete_all_indexes_impl()
async def delete_all_indexes_impl(self) -> Coroutine[Any, Any, Any]:
# No matter what happens above, always return True
return self.delete_aoss_indices()
async def create_aoss_indices(self):
for index in aoss_indices:
index_name = index['index_name']
client = self.aoss_client
if not client.indices.exists(index=index_name):
client.indices.create(index=index_name, body=index['body'])
# Sleep for 1 minute to let the index creation complete
await asyncio.sleep(60)
async def delete_aoss_indices(self):
for index in aoss_indices:
index_name = index['index_name']
client = self.aoss_client
if client.indices.exists(index=index_name):
client.indices.delete(index=index_name)
async def build_indices_and_constraints(self, delete_existing: bool = False):
# Neptune uses OpenSearch (AOSS) for indexing
if delete_existing:
await self.delete_aoss_indices()
await self.create_aoss_indices()
def run_aoss_query(self, name: str, query_text: str, limit: int = 10) -> dict[str, Any]:
for index in aoss_indices:
if name.lower() == index['index_name']:
index['query']['query']['multi_match']['query'] = query_text
query = {'size': limit, 'query': index['query']}
resp = self.aoss_client.search(body=query['query'], index=index['index_name'])
return resp
return {}
def save_to_aoss(self, name: str, data: list[dict]) -> int:
for index in aoss_indices:
if name.lower() == index['index_name']:
to_index = []
for d in data:
item = {'_index': name, '_id': d['uuid']}
for p in index['body']['mappings']['properties']:
if p in d:
item[p] = d[p]
to_index.append(item)
success, failed = helpers.bulk(self.aoss_client, to_index, stats_only=True)
return success
return 0
class NeptuneDriverSession(GraphDriverSession):
provider = GraphProvider.NEPTUNE
def __init__(self, driver: NeptuneDriver): # type: ignore[reportUnknownArgumentType]
self.driver = driver
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
# No cleanup needed for Neptune, but method must exist
pass
async def close(self):
# No explicit close needed for Neptune, but method must exist
pass
async def execute_write(self, func, *args, **kwargs):
# Directly await the provided async function with `self` as the transaction/session
return await func(self, *args, **kwargs)
async def run(self, query: str | list, **kwargs: Any) -> Any:
if isinstance(query, list):
res = None
for q in query:
res = await self.driver.execute_query(q, **kwargs)
return res
else:
return await self.driver.execute_query(str(query), **kwargs)
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/neptune_driver.py",
"license": "Apache License 2.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/test_edge_int.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import sys
from datetime import datetime
import numpy as np
import pytest
from graphiti_core.edges import CommunityEdge, EntityEdge, EpisodicEdge
from graphiti_core.nodes import CommunityNode, EntityNode, EpisodeType, EpisodicNode
from tests.helpers_test import get_edge_count, get_node_count, group_id
pytest_plugins = ('pytest_asyncio',)
def setup_logging():
# Create a logger
logger = logging.getLogger()
logger.setLevel(logging.INFO) # Set the logging level to INFO
# Create console handler and set level to INFO
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to console handler
console_handler.setFormatter(formatter)
# Add console handler to logger
logger.addHandler(console_handler)
return logger
@pytest.mark.asyncio
async def test_episodic_edge(graph_driver, mock_embedder):
now = datetime.now()
# Create episodic node
episode_node = EpisodicNode(
name='test_episode',
labels=[],
created_at=now,
valid_at=now,
source=EpisodeType.message,
source_description='conversation message',
content='Alice likes Bob',
entity_edges=[],
group_id=group_id,
)
node_count = await get_node_count(graph_driver, [episode_node.uuid])
assert node_count == 0
await episode_node.save(graph_driver)
node_count = await get_node_count(graph_driver, [episode_node.uuid])
assert node_count == 1
# Create entity node
alice_node = EntityNode(
name='Alice',
labels=[],
created_at=now,
summary='Alice summary',
group_id=group_id,
)
await alice_node.generate_name_embedding(mock_embedder)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
await alice_node.save(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 1
# Create episodic to entity edge
episodic_edge = EpisodicEdge(
source_node_uuid=episode_node.uuid,
target_node_uuid=alice_node.uuid,
created_at=now,
group_id=group_id,
)
edge_count = await get_edge_count(graph_driver, [episodic_edge.uuid])
assert edge_count == 0
await episodic_edge.save(graph_driver)
edge_count = await get_edge_count(graph_driver, [episodic_edge.uuid])
assert edge_count == 1
# Get edge by uuid
retrieved = await EpisodicEdge.get_by_uuid(graph_driver, episodic_edge.uuid)
assert retrieved.uuid == episodic_edge.uuid
assert retrieved.source_node_uuid == episode_node.uuid
assert retrieved.target_node_uuid == alice_node.uuid
assert retrieved.created_at == now
assert retrieved.group_id == group_id
# Get edge by uuids
retrieved = await EpisodicEdge.get_by_uuids(graph_driver, [episodic_edge.uuid])
assert len(retrieved) == 1
assert retrieved[0].uuid == episodic_edge.uuid
assert retrieved[0].source_node_uuid == episode_node.uuid
assert retrieved[0].target_node_uuid == alice_node.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Get edge by group ids
retrieved = await EpisodicEdge.get_by_group_ids(graph_driver, [group_id], limit=2)
assert len(retrieved) == 1
assert retrieved[0].uuid == episodic_edge.uuid
assert retrieved[0].source_node_uuid == episode_node.uuid
assert retrieved[0].target_node_uuid == alice_node.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Get episodic node by entity node uuid
retrieved = await EpisodicNode.get_by_entity_node_uuid(graph_driver, alice_node.uuid)
assert len(retrieved) == 1
assert retrieved[0].uuid == episode_node.uuid
assert retrieved[0].name == 'test_episode'
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Delete edge by uuid
await episodic_edge.delete(graph_driver)
edge_count = await get_edge_count(graph_driver, [episodic_edge.uuid])
assert edge_count == 0
# Delete edge by uuids
await episodic_edge.save(graph_driver)
await episodic_edge.delete_by_uuids(graph_driver, [episodic_edge.uuid])
edge_count = await get_edge_count(graph_driver, [episodic_edge.uuid])
assert edge_count == 0
# Cleanup nodes
await episode_node.delete(graph_driver)
node_count = await get_node_count(graph_driver, [episode_node.uuid])
assert node_count == 0
await alice_node.delete(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
await graph_driver.close()
@pytest.mark.asyncio
async def test_entity_edge(graph_driver, mock_embedder):
now = datetime.now()
# Create entity node
alice_node = EntityNode(
name='Alice',
labels=[],
created_at=now,
summary='Alice summary',
group_id=group_id,
)
await alice_node.generate_name_embedding(mock_embedder)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
await alice_node.save(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 1
# Create entity node
bob_node = EntityNode(
name='Bob', labels=[], created_at=now, summary='Bob summary', group_id=group_id
)
await bob_node.generate_name_embedding(mock_embedder)
node_count = await get_node_count(graph_driver, [bob_node.uuid])
assert node_count == 0
await bob_node.save(graph_driver)
node_count = await get_node_count(graph_driver, [bob_node.uuid])
assert node_count == 1
# Create entity to entity edge
entity_edge = EntityEdge(
source_node_uuid=alice_node.uuid,
target_node_uuid=bob_node.uuid,
created_at=now,
name='likes',
fact='Alice likes Bob',
episodes=[],
expired_at=now,
valid_at=now,
invalid_at=now,
group_id=group_id,
)
edge_embedding = await entity_edge.generate_embedding(mock_embedder)
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 0
await entity_edge.save(graph_driver)
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 1
# Get edge by uuid
retrieved = await EntityEdge.get_by_uuid(graph_driver, entity_edge.uuid)
assert retrieved.uuid == entity_edge.uuid
assert retrieved.source_node_uuid == alice_node.uuid
assert retrieved.target_node_uuid == bob_node.uuid
assert retrieved.created_at == now
assert retrieved.group_id == group_id
# Get edge by uuids
retrieved = await EntityEdge.get_by_uuids(graph_driver, [entity_edge.uuid])
assert len(retrieved) == 1
assert retrieved[0].uuid == entity_edge.uuid
assert retrieved[0].source_node_uuid == alice_node.uuid
assert retrieved[0].target_node_uuid == bob_node.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Get edge by group ids
retrieved = await EntityEdge.get_by_group_ids(graph_driver, [group_id], limit=2)
assert len(retrieved) == 1
assert retrieved[0].uuid == entity_edge.uuid
assert retrieved[0].source_node_uuid == alice_node.uuid
assert retrieved[0].target_node_uuid == bob_node.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Get edge by node uuid
retrieved = await EntityEdge.get_by_node_uuid(graph_driver, alice_node.uuid)
assert len(retrieved) == 1
assert retrieved[0].uuid == entity_edge.uuid
assert retrieved[0].source_node_uuid == alice_node.uuid
assert retrieved[0].target_node_uuid == bob_node.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Get fact embedding
await entity_edge.load_fact_embedding(graph_driver)
assert np.allclose(entity_edge.fact_embedding, edge_embedding)
# Delete edge by uuid
await entity_edge.delete(graph_driver)
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 0
# Delete edge by uuids
await entity_edge.save(graph_driver)
await entity_edge.delete_by_uuids(graph_driver, [entity_edge.uuid])
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 0
# Deleting node should delete the edge
await entity_edge.save(graph_driver)
await alice_node.delete(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 0
# Deleting node by uuids should delete the edge
await alice_node.save(graph_driver)
await entity_edge.save(graph_driver)
await alice_node.delete_by_uuids(graph_driver, [alice_node.uuid])
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 0
# Deleting node by group id should delete the edge
await alice_node.save(graph_driver)
await entity_edge.save(graph_driver)
await alice_node.delete_by_group_id(graph_driver, alice_node.group_id)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
edge_count = await get_edge_count(graph_driver, [entity_edge.uuid])
assert edge_count == 0
# Cleanup nodes
await alice_node.delete(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
await bob_node.delete(graph_driver)
node_count = await get_node_count(graph_driver, [bob_node.uuid])
assert node_count == 0
await graph_driver.close()
@pytest.mark.asyncio
async def test_community_edge(graph_driver, mock_embedder):
now = datetime.now()
# Create community node
community_node_1 = CommunityNode(
name='test_community_1',
group_id=group_id,
summary='Community A summary',
)
await community_node_1.generate_name_embedding(mock_embedder)
node_count = await get_node_count(graph_driver, [community_node_1.uuid])
assert node_count == 0
await community_node_1.save(graph_driver)
node_count = await get_node_count(graph_driver, [community_node_1.uuid])
assert node_count == 1
# Create community node
community_node_2 = CommunityNode(
name='test_community_2',
group_id=group_id,
summary='Community B summary',
)
await community_node_2.generate_name_embedding(mock_embedder)
node_count = await get_node_count(graph_driver, [community_node_2.uuid])
assert node_count == 0
await community_node_2.save(graph_driver)
node_count = await get_node_count(graph_driver, [community_node_2.uuid])
assert node_count == 1
# Create entity node
alice_node = EntityNode(
name='Alice', labels=[], created_at=now, summary='Alice summary', group_id=group_id
)
await alice_node.generate_name_embedding(mock_embedder)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
await alice_node.save(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 1
# Create community to community edge
community_edge = CommunityEdge(
source_node_uuid=community_node_1.uuid,
target_node_uuid=community_node_2.uuid,
created_at=now,
group_id=group_id,
)
edge_count = await get_edge_count(graph_driver, [community_edge.uuid])
assert edge_count == 0
await community_edge.save(graph_driver)
edge_count = await get_edge_count(graph_driver, [community_edge.uuid])
assert edge_count == 1
# Get edge by uuid
retrieved = await CommunityEdge.get_by_uuid(graph_driver, community_edge.uuid)
assert retrieved.uuid == community_edge.uuid
assert retrieved.source_node_uuid == community_node_1.uuid
assert retrieved.target_node_uuid == community_node_2.uuid
assert retrieved.created_at == now
assert retrieved.group_id == group_id
# Get edge by uuids
retrieved = await CommunityEdge.get_by_uuids(graph_driver, [community_edge.uuid])
assert len(retrieved) == 1
assert retrieved[0].uuid == community_edge.uuid
assert retrieved[0].source_node_uuid == community_node_1.uuid
assert retrieved[0].target_node_uuid == community_node_2.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Get edge by group ids
retrieved = await CommunityEdge.get_by_group_ids(graph_driver, [group_id], limit=1)
assert len(retrieved) == 1
assert retrieved[0].uuid == community_edge.uuid
assert retrieved[0].source_node_uuid == community_node_1.uuid
assert retrieved[0].target_node_uuid == community_node_2.uuid
assert retrieved[0].created_at == now
assert retrieved[0].group_id == group_id
# Delete edge by uuid
await community_edge.delete(graph_driver)
edge_count = await get_edge_count(graph_driver, [community_edge.uuid])
assert edge_count == 0
# Delete edge by uuids
await community_edge.save(graph_driver)
await community_edge.delete_by_uuids(graph_driver, [community_edge.uuid])
edge_count = await get_edge_count(graph_driver, [community_edge.uuid])
assert edge_count == 0
# Cleanup nodes
await alice_node.delete(graph_driver)
node_count = await get_node_count(graph_driver, [alice_node.uuid])
assert node_count == 0
await community_node_1.delete(graph_driver)
node_count = await get_node_count(graph_driver, [community_node_1.uuid])
assert node_count == 0
await community_node_2.delete(graph_driver)
node_count = await get_node_count(graph_driver, [community_node_2.uuid])
assert node_count == 0
await graph_driver.close()
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/test_edge_int.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/cross_encoder/gemini_reranker_client.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
from typing import TYPE_CHECKING
from ..helpers import semaphore_gather
from ..llm_client import LLMConfig, RateLimitError
from .client import CrossEncoderClient
if TYPE_CHECKING:
from google import genai
from google.genai import types
else:
try:
from google import genai
from google.genai import types
except ImportError:
raise ImportError(
'google-genai is required for GeminiRerankerClient. '
'Install it with: pip install graphiti-core[google-genai]'
) from None
logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gemini-2.5-flash-lite'
class GeminiRerankerClient(CrossEncoderClient):
"""
Google Gemini Reranker Client
"""
def __init__(
self,
config: LLMConfig | None = None,
client: 'genai.Client | None' = None,
):
"""
Initialize the GeminiRerankerClient with the provided configuration and client.
The Gemini Developer API does not yet support logprobs. Unlike the OpenAI reranker,
this reranker uses the Gemini API to perform direct relevance scoring of passages.
Each passage is scored individually on a 0-100 scale.
Args:
config (LLMConfig | None): The configuration for the LLM client, including API key, model, base URL, temperature, and max tokens.
client (genai.Client | None): An optional async client instance to use. If not provided, a new genai.Client is created.
"""
if config is None:
config = LLMConfig()
self.config = config
if client is None:
self.client = genai.Client(api_key=config.api_key)
else:
self.client = client
async def rank(self, query: str, passages: list[str]) -> list[tuple[str, float]]:
"""
Rank passages based on their relevance to the query using direct scoring.
Each passage is scored individually on a 0-100 scale, then normalized to [0,1].
"""
if len(passages) <= 1:
return [(passage, 1.0) for passage in passages]
# Generate scoring prompts for each passage
scoring_prompts = []
for passage in passages:
prompt = f"""Rate how well this passage answers or relates to the query. Use a scale from 0 to 100.
Query: {query}
Passage: {passage}
Provide only a number between 0 and 100 (no explanation, just the number):"""
scoring_prompts.append(
[
types.Content(
role='user',
parts=[types.Part.from_text(text=prompt)],
),
]
)
try:
# Execute all scoring requests concurrently - O(n) API calls
responses = await semaphore_gather(
*[
self.client.aio.models.generate_content(
model=self.config.model or DEFAULT_MODEL,
contents=prompt_messages, # type: ignore
config=types.GenerateContentConfig(
system_instruction='You are an expert at rating passage relevance. Respond with only a number from 0-100.',
temperature=0.0,
max_output_tokens=3,
),
)
for prompt_messages in scoring_prompts
]
)
# Extract scores and create results
results = []
for passage, response in zip(passages, responses, strict=True):
try:
if hasattr(response, 'text') and response.text:
# Extract numeric score from response
score_text = response.text.strip()
# Handle cases where model might return non-numeric text
score_match = re.search(r'\b(\d{1,3})\b', score_text)
if score_match:
score = float(score_match.group(1))
# Normalize to [0, 1] range and clamp to valid range
normalized_score = max(0.0, min(1.0, score / 100.0))
results.append((passage, normalized_score))
else:
logger.warning(
f'Could not extract numeric score from response: {score_text}'
)
results.append((passage, 0.0))
else:
logger.warning('Empty response from Gemini for passage scoring')
results.append((passage, 0.0))
except (ValueError, AttributeError) as e:
logger.warning(f'Error parsing score from Gemini response: {e}')
results.append((passage, 0.0))
# Sort by score in descending order (highest relevance first)
results.sort(reverse=True, key=lambda x: x[1])
return results
except Exception as e:
# Check if it's a rate limit error based on Gemini API error codes
error_message = str(e).lower()
if (
'rate limit' in error_message
or 'quota' in error_message
or 'resource_exhausted' in error_message
or '429' in str(e)
):
raise RateLimitError from e
logger.error(f'Error in generating LLM response: {e}')
raise
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/cross_encoder/gemini_reranker_client.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/cross_encoder/test_gemini_reranker_client.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Running tests: pytest -xvs tests/cross_encoder/test_gemini_reranker_client.py
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from graphiti_core.cross_encoder.gemini_reranker_client import GeminiRerankerClient
from graphiti_core.llm_client import LLMConfig, RateLimitError
@pytest.fixture
def mock_gemini_client():
"""Fixture to mock the Google Gemini client."""
with patch('google.genai.Client') as mock_client:
# Setup mock instance and its methods
mock_instance = mock_client.return_value
mock_instance.aio = MagicMock()
mock_instance.aio.models = MagicMock()
mock_instance.aio.models.generate_content = AsyncMock()
yield mock_instance
@pytest.fixture
def gemini_reranker_client(mock_gemini_client):
"""Fixture to create a GeminiRerankerClient with a mocked client."""
config = LLMConfig(api_key='test_api_key', model='test-model')
client = GeminiRerankerClient(config=config)
# Replace the client's client with our mock to ensure we're using the mock
client.client = mock_gemini_client
return client
def create_mock_response(score_text: str) -> MagicMock:
"""Helper function to create a mock Gemini response."""
mock_response = MagicMock()
mock_response.text = score_text
return mock_response
class TestGeminiRerankerClientInitialization:
"""Tests for GeminiRerankerClient initialization."""
def test_init_with_config(self):
"""Test initialization with a config object."""
config = LLMConfig(api_key='test_api_key', model='test-model')
client = GeminiRerankerClient(config=config)
assert client.config == config
@patch('google.genai.Client')
def test_init_without_config(self, mock_client):
"""Test initialization without a config uses defaults."""
client = GeminiRerankerClient()
assert client.config is not None
def test_init_with_custom_client(self):
"""Test initialization with a custom client."""
mock_client = MagicMock()
client = GeminiRerankerClient(client=mock_client)
assert client.client == mock_client
class TestGeminiRerankerClientRanking:
"""Tests for GeminiRerankerClient rank method."""
@pytest.mark.asyncio
async def test_rank_basic_functionality(self, gemini_reranker_client, mock_gemini_client):
"""Test basic ranking functionality."""
# Setup mock responses with different scores
mock_responses = [
create_mock_response('85'), # High relevance
create_mock_response('45'), # Medium relevance
create_mock_response('20'), # Low relevance
]
mock_gemini_client.aio.models.generate_content.side_effect = mock_responses
# Test data
query = 'What is the capital of France?'
passages = [
'Paris is the capital and most populous city of France.',
'London is the capital city of England and the United Kingdom.',
'Berlin is the capital and largest city of Germany.',
]
# Call method
result = await gemini_reranker_client.rank(query, passages)
# Assertions
assert len(result) == 3
assert all(isinstance(item, tuple) for item in result)
assert all(
isinstance(passage, str) and isinstance(score, float) for passage, score in result
)
# Check scores are normalized to [0, 1] and sorted in descending order
scores = [score for _, score in result]
assert all(0.0 <= score <= 1.0 for score in scores)
assert scores == sorted(scores, reverse=True)
# Check that the highest scoring passage is first
assert result[0][1] == 0.85 # 85/100
assert result[1][1] == 0.45 # 45/100
assert result[2][1] == 0.20 # 20/100
@pytest.mark.asyncio
async def test_rank_empty_passages(self, gemini_reranker_client):
"""Test ranking with empty passages list."""
query = 'Test query'
passages = []
result = await gemini_reranker_client.rank(query, passages)
assert result == []
@pytest.mark.asyncio
async def test_rank_single_passage(self, gemini_reranker_client, mock_gemini_client):
"""Test ranking with a single passage."""
# Setup mock response
mock_gemini_client.aio.models.generate_content.return_value = create_mock_response('75')
query = 'Test query'
passages = ['Single test passage']
result = await gemini_reranker_client.rank(query, passages)
assert len(result) == 1
assert result[0][0] == 'Single test passage'
assert result[0][1] == 1.0 # Single passage gets full score
@pytest.mark.asyncio
async def test_rank_score_extraction_with_regex(
self, gemini_reranker_client, mock_gemini_client
):
"""Test score extraction from various response formats."""
# Setup mock responses with different formats
mock_responses = [
create_mock_response('Score: 90'), # Contains text before number
create_mock_response('The relevance is 65 out of 100'), # Contains text around number
create_mock_response('8'), # Just the number
]
mock_gemini_client.aio.models.generate_content.side_effect = mock_responses
query = 'Test query'
passages = ['Passage 1', 'Passage 2', 'Passage 3']
result = await gemini_reranker_client.rank(query, passages)
# Check that scores were extracted correctly and normalized
scores = [score for _, score in result]
assert 0.90 in scores # 90/100
assert 0.65 in scores # 65/100
assert 0.08 in scores # 8/100
@pytest.mark.asyncio
async def test_rank_invalid_score_handling(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of invalid or non-numeric scores."""
# Setup mock responses with invalid scores
mock_responses = [
create_mock_response('Not a number'), # Invalid response
create_mock_response(''), # Empty response
create_mock_response('95'), # Valid response
]
mock_gemini_client.aio.models.generate_content.side_effect = mock_responses
query = 'Test query'
passages = ['Passage 1', 'Passage 2', 'Passage 3']
result = await gemini_reranker_client.rank(query, passages)
# Check that invalid scores are handled gracefully (assigned 0.0)
scores = [score for _, score in result]
assert 0.95 in scores # Valid score
assert scores.count(0.0) == 2 # Two invalid scores assigned 0.0
@pytest.mark.asyncio
async def test_rank_score_clamping(self, gemini_reranker_client, mock_gemini_client):
"""Test that scores are properly clamped to [0, 1] range."""
# Setup mock responses with extreme scores
# Note: regex only matches 1-3 digits, so negative numbers won't match
mock_responses = [
create_mock_response('999'), # Above 100 but within regex range
create_mock_response('invalid'), # Invalid response becomes 0.0
create_mock_response('50'), # Normal score
]
mock_gemini_client.aio.models.generate_content.side_effect = mock_responses
query = 'Test query'
passages = ['Passage 1', 'Passage 2', 'Passage 3']
result = await gemini_reranker_client.rank(query, passages)
# Check that scores are normalized and clamped
scores = [score for _, score in result]
assert all(0.0 <= score <= 1.0 for score in scores)
# 999 should be clamped to 1.0 (999/100 = 9.99, clamped to 1.0)
assert 1.0 in scores
# Invalid response should be 0.0
assert 0.0 in scores
# Normal score should be normalized (50/100 = 0.5)
assert 0.5 in scores
@pytest.mark.asyncio
async def test_rank_rate_limit_error(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of rate limit errors."""
# Setup mock to raise rate limit error
mock_gemini_client.aio.models.generate_content.side_effect = Exception(
'Rate limit exceeded'
)
query = 'Test query'
passages = ['Passage 1', 'Passage 2']
with pytest.raises(RateLimitError):
await gemini_reranker_client.rank(query, passages)
@pytest.mark.asyncio
async def test_rank_quota_error(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of quota errors."""
# Setup mock to raise quota error
mock_gemini_client.aio.models.generate_content.side_effect = Exception('Quota exceeded')
query = 'Test query'
passages = ['Passage 1', 'Passage 2']
with pytest.raises(RateLimitError):
await gemini_reranker_client.rank(query, passages)
@pytest.mark.asyncio
async def test_rank_resource_exhausted_error(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of resource exhausted errors."""
# Setup mock to raise resource exhausted error
mock_gemini_client.aio.models.generate_content.side_effect = Exception('resource_exhausted')
query = 'Test query'
passages = ['Passage 1', 'Passage 2']
with pytest.raises(RateLimitError):
await gemini_reranker_client.rank(query, passages)
@pytest.mark.asyncio
async def test_rank_429_error(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of HTTP 429 errors."""
# Setup mock to raise 429 error
mock_gemini_client.aio.models.generate_content.side_effect = Exception(
'HTTP 429 Too Many Requests'
)
query = 'Test query'
passages = ['Passage 1', 'Passage 2']
with pytest.raises(RateLimitError):
await gemini_reranker_client.rank(query, passages)
@pytest.mark.asyncio
async def test_rank_generic_error(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of generic errors."""
# Setup mock to raise generic error
mock_gemini_client.aio.models.generate_content.side_effect = Exception('Generic error')
query = 'Test query'
passages = ['Passage 1', 'Passage 2']
with pytest.raises(Exception) as exc_info:
await gemini_reranker_client.rank(query, passages)
assert 'Generic error' in str(exc_info.value)
@pytest.mark.asyncio
async def test_rank_concurrent_requests(self, gemini_reranker_client, mock_gemini_client):
"""Test that multiple passages are scored concurrently."""
# Setup mock responses
mock_responses = [
create_mock_response('80'),
create_mock_response('60'),
create_mock_response('40'),
]
mock_gemini_client.aio.models.generate_content.side_effect = mock_responses
query = 'Test query'
passages = ['Passage 1', 'Passage 2', 'Passage 3']
await gemini_reranker_client.rank(query, passages)
# Verify that generate_content was called for each passage
assert mock_gemini_client.aio.models.generate_content.call_count == 3
# Verify that all calls were made with correct parameters
calls = mock_gemini_client.aio.models.generate_content.call_args_list
for call in calls:
args, kwargs = call
assert kwargs['model'] == gemini_reranker_client.config.model
assert kwargs['config'].temperature == 0.0
assert kwargs['config'].max_output_tokens == 3
@pytest.mark.asyncio
async def test_rank_response_parsing_error(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of response parsing errors."""
# Setup mock responses that will trigger ValueError during parsing
mock_responses = [
create_mock_response('not a number at all'), # Will fail regex match
create_mock_response('also invalid text'), # Will fail regex match
]
mock_gemini_client.aio.models.generate_content.side_effect = mock_responses
query = 'Test query'
# Use multiple passages to avoid the single passage special case
passages = ['Passage 1', 'Passage 2']
result = await gemini_reranker_client.rank(query, passages)
# Should handle the error gracefully and assign 0.0 score to both
assert len(result) == 2
assert all(score == 0.0 for _, score in result)
@pytest.mark.asyncio
async def test_rank_empty_response_text(self, gemini_reranker_client, mock_gemini_client):
"""Test handling of empty response text."""
# Setup mock response with empty text
mock_response = MagicMock()
mock_response.text = '' # Empty string instead of None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
query = 'Test query'
# Use multiple passages to avoid the single passage special case
passages = ['Passage 1', 'Passage 2']
result = await gemini_reranker_client.rank(query, passages)
# Should handle empty text gracefully and assign 0.0 score to both
assert len(result) == 2
assert all(score == 0.0 for _, score in result)
if __name__ == '__main__':
pytest.main(['-v', 'test_gemini_reranker_client.py'])
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/cross_encoder/test_gemini_reranker_client.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:tests/llm_client/test_gemini_client.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Running tests: pytest -xvs tests/llm_client/test_gemini_client.py
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from pydantic import BaseModel
from graphiti_core.llm_client.config import LLMConfig, ModelSize
from graphiti_core.llm_client.errors import RateLimitError
from graphiti_core.llm_client.gemini_client import DEFAULT_MODEL, DEFAULT_SMALL_MODEL, GeminiClient
from graphiti_core.prompts.models import Message
# Test model for response testing
class ResponseModel(BaseModel):
"""Test model for response testing."""
test_field: str
optional_field: int = 0
@pytest.fixture
def mock_gemini_client():
"""Fixture to mock the Google Gemini client."""
with patch('google.genai.Client') as mock_client:
# Setup mock instance and its methods
mock_instance = mock_client.return_value
mock_instance.aio = MagicMock()
mock_instance.aio.models = MagicMock()
mock_instance.aio.models.generate_content = AsyncMock()
yield mock_instance
@pytest.fixture
def gemini_client(mock_gemini_client):
"""Fixture to create a GeminiClient with a mocked client."""
config = LLMConfig(api_key='test_api_key', model='test-model', temperature=0.5, max_tokens=1000)
client = GeminiClient(config=config, cache=False)
# Replace the client's client with our mock to ensure we're using the mock
client.client = mock_gemini_client
return client
class TestGeminiClientInitialization:
"""Tests for GeminiClient initialization."""
@patch('google.genai.Client')
def test_init_with_config(self, mock_client):
"""Test initialization with a config object."""
config = LLMConfig(
api_key='test_api_key', model='test-model', temperature=0.5, max_tokens=1000
)
client = GeminiClient(config=config, cache=False, max_tokens=1000)
assert client.config == config
assert client.model == 'test-model'
assert client.temperature == 0.5
assert client.max_tokens == 1000
@patch('google.genai.Client')
def test_init_with_default_model(self, mock_client):
"""Test initialization with default model when none is provided."""
config = LLMConfig(api_key='test_api_key', model=DEFAULT_MODEL)
client = GeminiClient(config=config, cache=False)
assert client.model == DEFAULT_MODEL
@patch('google.genai.Client')
def test_init_without_config(self, mock_client):
"""Test initialization without a config uses defaults."""
client = GeminiClient(cache=False)
assert client.config is not None
# When no config.model is set, it will be None, not DEFAULT_MODEL
assert client.model is None
@patch('google.genai.Client')
def test_init_with_thinking_config(self, mock_client):
"""Test initialization with thinking config."""
with patch('google.genai.types.ThinkingConfig') as mock_thinking_config:
thinking_config = mock_thinking_config.return_value
client = GeminiClient(thinking_config=thinking_config)
assert client.thinking_config == thinking_config
class TestGeminiClientGenerateResponse:
"""Tests for GeminiClient generate_response method."""
@pytest.mark.asyncio
async def test_generate_response_simple_text(self, gemini_client, mock_gemini_client):
"""Test successful response generation with simple text."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = 'Test response text'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method
messages = [Message(role='user', content='Test message')]
result = await gemini_client.generate_response(messages)
# Assertions
assert isinstance(result, dict)
assert result['content'] == 'Test response text'
mock_gemini_client.aio.models.generate_content.assert_called_once()
@pytest.mark.asyncio
async def test_generate_response_with_structured_output(
self, gemini_client, mock_gemini_client
):
"""Test response generation with structured output."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = '{"test_field": "test_value", "optional_field": 42}'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method
messages = [
Message(role='system', content='System message'),
Message(role='user', content='User message'),
]
result = await gemini_client.generate_response(
messages=messages, response_model=ResponseModel
)
# Assertions
assert isinstance(result, dict)
assert result['test_field'] == 'test_value'
assert result['optional_field'] == 42
mock_gemini_client.aio.models.generate_content.assert_called_once()
@pytest.mark.asyncio
async def test_generate_response_with_system_message(self, gemini_client, mock_gemini_client):
"""Test response generation with system message handling."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = 'Response with system context'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method
messages = [
Message(role='system', content='System message'),
Message(role='user', content='User message'),
]
await gemini_client.generate_response(messages)
# Verify system message is processed correctly
call_args = mock_gemini_client.aio.models.generate_content.call_args
config = call_args[1]['config']
assert 'System message' in config.system_instruction
@pytest.mark.asyncio
async def test_get_model_for_size(self, gemini_client):
"""Test model selection based on size."""
# Test small model
small_model = gemini_client._get_model_for_size(ModelSize.small)
assert small_model == DEFAULT_SMALL_MODEL
# Test medium/large model
medium_model = gemini_client._get_model_for_size(ModelSize.medium)
assert medium_model == gemini_client.model
@pytest.mark.asyncio
async def test_rate_limit_error_handling(self, gemini_client, mock_gemini_client):
"""Test handling of rate limit errors."""
# Setup mock to raise rate limit error
mock_gemini_client.aio.models.generate_content.side_effect = Exception(
'Rate limit exceeded'
)
# Call method and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(RateLimitError):
await gemini_client.generate_response(messages)
@pytest.mark.asyncio
async def test_quota_error_handling(self, gemini_client, mock_gemini_client):
"""Test handling of quota errors."""
# Setup mock to raise quota error
mock_gemini_client.aio.models.generate_content.side_effect = Exception(
'Quota exceeded for requests'
)
# Call method and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(RateLimitError):
await gemini_client.generate_response(messages)
@pytest.mark.asyncio
async def test_resource_exhausted_error_handling(self, gemini_client, mock_gemini_client):
"""Test handling of resource exhausted errors."""
# Setup mock to raise resource exhausted error
mock_gemini_client.aio.models.generate_content.side_effect = Exception(
'resource_exhausted: Request limit exceeded'
)
# Call method and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(RateLimitError):
await gemini_client.generate_response(messages)
@pytest.mark.asyncio
async def test_safety_block_handling(self, gemini_client, mock_gemini_client):
"""Test handling of safety blocks."""
# Setup mock response with safety block
mock_candidate = MagicMock()
mock_candidate.finish_reason = 'SAFETY'
mock_candidate.safety_ratings = [
MagicMock(blocked=True, category='HARM_CATEGORY_HARASSMENT', probability='HIGH')
]
mock_response = MagicMock()
mock_response.candidates = [mock_candidate]
mock_response.prompt_feedback = None
mock_response.text = ''
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(Exception, match='Content blocked by safety filters'):
await gemini_client.generate_response(messages)
@pytest.mark.asyncio
async def test_prompt_block_handling(self, gemini_client, mock_gemini_client):
"""Test handling of prompt blocks."""
# Setup mock response with prompt block
mock_prompt_feedback = MagicMock()
mock_prompt_feedback.block_reason = 'BLOCKED_REASON_OTHER'
mock_response = MagicMock()
mock_response.candidates = []
mock_response.prompt_feedback = mock_prompt_feedback
mock_response.text = ''
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(Exception, match='Content blocked by safety filters'):
await gemini_client.generate_response(messages)
@pytest.mark.asyncio
async def test_structured_output_parsing_error(self, gemini_client, mock_gemini_client):
"""Test handling of structured output parsing errors."""
# Setup mock response with invalid JSON that will exhaust retries
mock_response = MagicMock()
mock_response.text = 'Invalid JSON that cannot be parsed'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method and check exception - should exhaust retries
messages = [Message(role='user', content='Test message')]
with pytest.raises(Exception): # noqa: B017
await gemini_client.generate_response(messages, response_model=ResponseModel)
# Should have called generate_content MAX_RETRIES times (2 attempts total)
assert mock_gemini_client.aio.models.generate_content.call_count == GeminiClient.MAX_RETRIES
@pytest.mark.asyncio
async def test_retry_logic_with_safety_block(self, gemini_client, mock_gemini_client):
"""Test that safety blocks are not retried."""
# Setup mock response with safety block
mock_candidate = MagicMock()
mock_candidate.finish_reason = 'SAFETY'
mock_candidate.safety_ratings = [
MagicMock(blocked=True, category='HARM_CATEGORY_HARASSMENT', probability='HIGH')
]
mock_response = MagicMock()
mock_response.candidates = [mock_candidate]
mock_response.prompt_feedback = None
mock_response.text = ''
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method and check that it doesn't retry
messages = [Message(role='user', content='Test message')]
with pytest.raises(Exception, match='Content blocked by safety filters'):
await gemini_client.generate_response(messages)
# Should only be called once (no retries for safety blocks)
assert mock_gemini_client.aio.models.generate_content.call_count == 1
@pytest.mark.asyncio
async def test_retry_logic_with_validation_error(self, gemini_client, mock_gemini_client):
"""Test retry behavior on validation error."""
# First call returns invalid JSON, second call returns valid data
mock_response1 = MagicMock()
mock_response1.text = 'Invalid JSON that cannot be parsed'
mock_response1.candidates = []
mock_response1.prompt_feedback = None
mock_response2 = MagicMock()
mock_response2.text = '{"test_field": "correct_value"}'
mock_response2.candidates = []
mock_response2.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.side_effect = [
mock_response1,
mock_response2,
]
# Call method
messages = [Message(role='user', content='Test message')]
result = await gemini_client.generate_response(messages, response_model=ResponseModel)
# Should have called generate_content twice due to retry
assert mock_gemini_client.aio.models.generate_content.call_count == 2
assert result['test_field'] == 'correct_value'
@pytest.mark.asyncio
async def test_max_retries_exceeded(self, gemini_client, mock_gemini_client):
"""Test behavior when max retries are exceeded."""
# Setup mock to always return invalid JSON
mock_response = MagicMock()
mock_response.text = 'Invalid JSON that cannot be parsed'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(Exception): # noqa: B017
await gemini_client.generate_response(messages, response_model=ResponseModel)
# Should have called generate_content MAX_RETRIES times (2 attempts total)
assert mock_gemini_client.aio.models.generate_content.call_count == GeminiClient.MAX_RETRIES
@pytest.mark.asyncio
async def test_empty_response_handling(self, gemini_client, mock_gemini_client):
"""Test handling of empty responses."""
# Setup mock response with no text
mock_response = MagicMock()
mock_response.text = ''
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method with structured output and check exception
messages = [Message(role='user', content='Test message')]
with pytest.raises(Exception): # noqa: B017
await gemini_client.generate_response(messages, response_model=ResponseModel)
# Should have exhausted retries due to empty response (2 attempts total)
assert mock_gemini_client.aio.models.generate_content.call_count == GeminiClient.MAX_RETRIES
@pytest.mark.asyncio
async def test_custom_max_tokens(self, gemini_client, mock_gemini_client):
"""Test that explicit max_tokens parameter takes precedence over all other values."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = 'Test response'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method with custom max tokens (should take precedence)
messages = [Message(role='user', content='Test message')]
await gemini_client.generate_response(messages, max_tokens=500)
# Verify explicit max_tokens parameter takes precedence
call_args = mock_gemini_client.aio.models.generate_content.call_args
config = call_args[1]['config']
# Explicit parameter should override everything else
assert config.max_output_tokens == 500
@pytest.mark.asyncio
async def test_max_tokens_precedence_fallback(self, mock_gemini_client):
"""Test max_tokens precedence when no explicit parameter is provided."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = 'Test response'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Test case 1: No explicit max_tokens, has instance max_tokens
config = LLMConfig(
api_key='test_api_key', model='test-model', temperature=0.5, max_tokens=1000
)
client = GeminiClient(
config=config, cache=False, max_tokens=2000, client=mock_gemini_client
)
messages = [Message(role='user', content='Test message')]
await client.generate_response(messages)
call_args = mock_gemini_client.aio.models.generate_content.call_args
config = call_args[1]['config']
# Instance max_tokens should be used
assert config.max_output_tokens == 2000
# Test case 2: No explicit max_tokens, no instance max_tokens, uses model mapping
config = LLMConfig(api_key='test_api_key', model='gemini-2.5-flash', temperature=0.5)
client = GeminiClient(config=config, cache=False, client=mock_gemini_client)
messages = [Message(role='user', content='Test message')]
await client.generate_response(messages)
call_args = mock_gemini_client.aio.models.generate_content.call_args
config = call_args[1]['config']
# Model mapping should be used
assert config.max_output_tokens == 65536
@pytest.mark.asyncio
async def test_model_size_selection(self, gemini_client, mock_gemini_client):
"""Test that the correct model is selected based on model size."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = 'Test response'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Call method with small model size
messages = [Message(role='user', content='Test message')]
await gemini_client.generate_response(messages, model_size=ModelSize.small)
# Verify correct model is used
call_args = mock_gemini_client.aio.models.generate_content.call_args
assert call_args[1]['model'] == DEFAULT_SMALL_MODEL
@pytest.mark.asyncio
async def test_gemini_model_max_tokens_mapping(self, mock_gemini_client):
"""Test that different Gemini models use their correct max tokens."""
# Setup mock response
mock_response = MagicMock()
mock_response.text = 'Test response'
mock_response.candidates = []
mock_response.prompt_feedback = None
mock_gemini_client.aio.models.generate_content.return_value = mock_response
# Test data: (model_name, expected_max_tokens)
test_cases = [
('gemini-2.5-flash', 65536),
('gemini-2.5-pro', 65536),
('gemini-2.5-flash-lite', 64000),
('gemini-2.0-flash', 8192),
('gemini-1.5-pro', 8192),
('gemini-1.5-flash', 8192),
('unknown-model', 8192), # Fallback case
]
for model_name, expected_max_tokens in test_cases:
# Create client with specific model, no explicit max_tokens to test mapping
config = LLMConfig(api_key='test_api_key', model=model_name, temperature=0.5)
client = GeminiClient(config=config, cache=False, client=mock_gemini_client)
# Call method without explicit max_tokens to test model mapping fallback
messages = [Message(role='user', content='Test message')]
await client.generate_response(messages)
# Verify correct max tokens is used from model mapping
call_args = mock_gemini_client.aio.models.generate_content.call_args
config = call_args[1]['config']
assert config.max_output_tokens == expected_max_tokens, (
f'Model {model_name} should use {expected_max_tokens} tokens'
)
if __name__ == '__main__':
pytest.main(['-v', 'test_gemini_client.py'])
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/llm_client/test_gemini_client.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:tests/driver/test_falkordb_driver.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from datetime import datetime, timezone
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from graphiti_core.driver.driver import GraphProvider
try:
from graphiti_core.driver.falkordb_driver import FalkorDriver, FalkorDriverSession
HAS_FALKORDB = True
except ImportError:
FalkorDriver = None
HAS_FALKORDB = False
class TestFalkorDriver:
"""Comprehensive test suite for FalkorDB driver."""
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def setup_method(self):
"""Set up test fixtures."""
self.mock_client = MagicMock()
with patch('graphiti_core.driver.falkordb_driver.FalkorDB'):
self.driver = FalkorDriver()
self.driver.client = self.mock_client
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_init_with_connection_params(self):
"""Test initialization with connection parameters."""
with patch('graphiti_core.driver.falkordb_driver.FalkorDB') as mock_falkor_db:
driver = FalkorDriver(
host='test-host', port='1234', username='test-user', password='test-pass'
)
assert driver.provider == GraphProvider.FALKORDB
mock_falkor_db.assert_called_once_with(
host='test-host', port='1234', username='test-user', password='test-pass'
)
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_init_with_falkor_db_instance(self):
"""Test initialization with a FalkorDB instance."""
with patch('graphiti_core.driver.falkordb_driver.FalkorDB') as mock_falkor_db_class:
mock_falkor_db = MagicMock()
driver = FalkorDriver(falkor_db=mock_falkor_db)
assert driver.provider == GraphProvider.FALKORDB
assert driver.client is mock_falkor_db
mock_falkor_db_class.assert_not_called()
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_provider(self):
"""Test driver provider identification."""
assert self.driver.provider == GraphProvider.FALKORDB
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_get_graph_with_name(self):
"""Test _get_graph with specific graph name."""
mock_graph = MagicMock()
self.mock_client.select_graph.return_value = mock_graph
result = self.driver._get_graph('test_graph')
self.mock_client.select_graph.assert_called_once_with('test_graph')
assert result is mock_graph
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_get_graph_with_none_defaults_to_default_database(self):
"""Test _get_graph with None defaults to default_db."""
mock_graph = MagicMock()
self.mock_client.select_graph.return_value = mock_graph
result = self.driver._get_graph(None)
self.mock_client.select_graph.assert_called_once_with('default_db')
assert result is mock_graph
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_execute_query_success(self):
"""Test successful query execution."""
mock_graph = MagicMock()
mock_result = MagicMock()
mock_result.header = [('col1', 'column1'), ('col2', 'column2')]
mock_result.result_set = [['row1col1', 'row1col2']]
mock_graph.query = AsyncMock(return_value=mock_result)
self.mock_client.select_graph.return_value = mock_graph
result = await self.driver.execute_query('MATCH (n) RETURN n', param1='value1')
mock_graph.query.assert_called_once_with('MATCH (n) RETURN n', {'param1': 'value1'})
result_set, header, summary = result
assert result_set == [{'column1': 'row1col1', 'column2': 'row1col2'}]
assert header == ['column1', 'column2']
assert summary is None
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_execute_query_handles_index_already_exists_error(self):
"""Test handling of 'already indexed' error."""
mock_graph = MagicMock()
mock_graph.query = AsyncMock(side_effect=Exception('Index already indexed'))
self.mock_client.select_graph.return_value = mock_graph
with patch('graphiti_core.driver.falkordb_driver.logger') as mock_logger:
result = await self.driver.execute_query('CREATE INDEX ...')
mock_logger.info.assert_called_once()
assert result is None
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_execute_query_propagates_other_exceptions(self):
"""Test that other exceptions are properly propagated."""
mock_graph = MagicMock()
mock_graph.query = AsyncMock(side_effect=Exception('Other error'))
self.mock_client.select_graph.return_value = mock_graph
with patch('graphiti_core.driver.falkordb_driver.logger') as mock_logger:
with pytest.raises(Exception, match='Other error'):
await self.driver.execute_query('INVALID QUERY')
mock_logger.error.assert_called_once()
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_execute_query_converts_datetime_parameters(self):
"""Test that datetime objects in kwargs are converted to ISO strings."""
mock_graph = MagicMock()
mock_result = MagicMock()
mock_result.header = []
mock_result.result_set = []
mock_graph.query = AsyncMock(return_value=mock_result)
self.mock_client.select_graph.return_value = mock_graph
test_datetime = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
await self.driver.execute_query(
'CREATE (n:Node) SET n.created_at = $created_at', created_at=test_datetime
)
call_args = mock_graph.query.call_args[0]
assert call_args[1]['created_at'] == test_datetime.isoformat()
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_session_creation(self):
"""Test session creation with specific database."""
mock_graph = MagicMock()
self.mock_client.select_graph.return_value = mock_graph
session = self.driver.session()
assert isinstance(session, FalkorDriverSession)
assert session.graph is mock_graph
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_session_creation_with_none_uses_default_database(self):
"""Test session creation with None uses default database."""
mock_graph = MagicMock()
self.mock_client.select_graph.return_value = mock_graph
session = self.driver.session()
assert isinstance(session, FalkorDriverSession)
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_close_calls_connection_close(self):
"""Test driver close method calls connection close."""
mock_connection = MagicMock()
mock_connection.close = AsyncMock()
self.mock_client.connection = mock_connection
# Ensure hasattr checks work correctly
del self.mock_client.aclose # Remove aclose if it exists
with patch('builtins.hasattr') as mock_hasattr:
# hasattr(self.client, 'aclose') returns False
# hasattr(self.client.connection, 'aclose') returns False
# hasattr(self.client.connection, 'close') returns True
mock_hasattr.side_effect = lambda obj, attr: (
attr == 'close' and obj is mock_connection
)
await self.driver.close()
mock_connection.close.assert_called_once()
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_delete_all_indexes(self):
"""Test delete_all_indexes method."""
with patch.object(self.driver, 'execute_query', new_callable=AsyncMock) as mock_execute:
# Return None to simulate no indexes found
mock_execute.return_value = None
await self.driver.delete_all_indexes()
mock_execute.assert_called_once_with('CALL db.indexes()')
class TestFalkorDriverSession:
"""Test FalkorDB driver session functionality."""
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def setup_method(self):
"""Set up test fixtures."""
self.mock_graph = MagicMock()
self.session = FalkorDriverSession(self.mock_graph)
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_session_async_context_manager(self):
"""Test session can be used as async context manager."""
async with self.session as s:
assert s is self.session
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_close_method(self):
"""Test session close method doesn't raise exceptions."""
await self.session.close() # Should not raise
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_execute_write_passes_session_and_args(self):
"""Test execute_write method passes session and arguments correctly."""
async def test_func(session, *args, **kwargs):
assert session is self.session
assert args == ('arg1', 'arg2')
assert kwargs == {'key': 'value'}
return 'result'
result = await self.session.execute_write(test_func, 'arg1', 'arg2', key='value')
assert result == 'result'
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_run_single_query_with_parameters(self):
"""Test running a single query with parameters."""
self.mock_graph.query = AsyncMock()
await self.session.run('MATCH (n) RETURN n', param1='value1', param2='value2')
self.mock_graph.query.assert_called_once_with(
'MATCH (n) RETURN n', {'param1': 'value1', 'param2': 'value2'}
)
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_run_multiple_queries_as_list(self):
"""Test running multiple queries passed as list."""
self.mock_graph.query = AsyncMock()
queries = [
('MATCH (n) RETURN n', {'param1': 'value1'}),
('CREATE (n:Node)', {'param2': 'value2'}),
]
await self.session.run(queries)
assert self.mock_graph.query.call_count == 2
calls = self.mock_graph.query.call_args_list
assert calls[0][0] == ('MATCH (n) RETURN n', {'param1': 'value1'})
assert calls[1][0] == ('CREATE (n:Node)', {'param2': 'value2'})
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_run_converts_datetime_objects_to_iso_strings(self):
"""Test that datetime objects are converted to ISO strings."""
self.mock_graph.query = AsyncMock()
test_datetime = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
await self.session.run(
'CREATE (n:Node) SET n.created_at = $created_at', created_at=test_datetime
)
self.mock_graph.query.assert_called_once()
call_args = self.mock_graph.query.call_args[0]
assert call_args[1]['created_at'] == test_datetime.isoformat()
class TestDatetimeConversion:
"""Test datetime conversion utility function."""
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_convert_datetime_dict(self):
"""Test datetime conversion in nested dictionary."""
from graphiti_core.driver.falkordb_driver import convert_datetimes_to_strings
test_datetime = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
input_dict = {
'string_val': 'test',
'datetime_val': test_datetime,
'nested_dict': {'nested_datetime': test_datetime, 'nested_string': 'nested_test'},
}
result = convert_datetimes_to_strings(input_dict)
assert result['string_val'] == 'test'
assert result['datetime_val'] == test_datetime.isoformat()
assert result['nested_dict']['nested_datetime'] == test_datetime.isoformat()
assert result['nested_dict']['nested_string'] == 'nested_test'
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_convert_datetime_list_and_tuple(self):
"""Test datetime conversion in lists and tuples."""
from graphiti_core.driver.falkordb_driver import convert_datetimes_to_strings
test_datetime = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
# Test list
input_list = ['test', test_datetime, ['nested', test_datetime]]
result_list = convert_datetimes_to_strings(input_list)
assert result_list[0] == 'test'
assert result_list[1] == test_datetime.isoformat()
assert result_list[2][1] == test_datetime.isoformat()
# Test tuple
input_tuple = ('test', test_datetime)
result_tuple = convert_datetimes_to_strings(input_tuple)
assert isinstance(result_tuple, tuple)
assert result_tuple[0] == 'test'
assert result_tuple[1] == test_datetime.isoformat()
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_convert_single_datetime(self):
"""Test datetime conversion for single datetime object."""
from graphiti_core.driver.falkordb_driver import convert_datetimes_to_strings
test_datetime = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
result = convert_datetimes_to_strings(test_datetime)
assert result == test_datetime.isoformat()
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
def test_convert_other_types_unchanged(self):
"""Test that non-datetime types are returned unchanged."""
from graphiti_core.driver.falkordb_driver import convert_datetimes_to_strings
assert convert_datetimes_to_strings('string') == 'string'
assert convert_datetimes_to_strings(123) == 123
assert convert_datetimes_to_strings(None) is None
assert convert_datetimes_to_strings(True) is True
# Simple integration test
class TestFalkorDriverIntegration:
"""Simple integration test for FalkorDB driver."""
@pytest.mark.asyncio
@unittest.skipIf(not HAS_FALKORDB, 'FalkorDB is not installed')
async def test_basic_integration_with_real_falkordb(self):
"""Basic integration test with real FalkorDB instance."""
pytest.importorskip('falkordb')
falkor_host = os.getenv('FALKORDB_HOST', 'localhost')
falkor_port = os.getenv('FALKORDB_PORT', '6379')
try:
driver = FalkorDriver(host=falkor_host, port=falkor_port)
# Test basic query execution
result = await driver.execute_query('RETURN 1 as test')
assert result is not None
result_set, header, summary = result
assert header == ['test']
assert result_set == [{'test': 1}]
await driver.close()
except Exception as e:
pytest.skip(f'FalkorDB not available for integration test: {e}')
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/driver/test_falkordb_driver.py",
"license": "Apache License 2.0",
"lines": 303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/telemetry/telemetry.py | """
Telemetry client for Graphiti.
Collects anonymous usage statistics to help improve the product.
"""
import contextlib
import os
import platform
import sys
import uuid
from pathlib import Path
from typing import Any
# PostHog configuration
# Note: This is a public API key intended for client-side use and safe to commit
# PostHog public keys are designed to be exposed in client applications
POSTHOG_API_KEY = 'phc_UG6EcfDbuXz92neb3rMlQFDY0csxgMqRcIPWESqnSmo'
POSTHOG_HOST = 'https://us.i.posthog.com'
# Environment variable to control telemetry
TELEMETRY_ENV_VAR = 'GRAPHITI_TELEMETRY_ENABLED'
# Cache directory for anonymous ID
CACHE_DIR = Path.home() / '.cache' / 'graphiti'
ANON_ID_FILE = CACHE_DIR / 'telemetry_anon_id'
def is_telemetry_enabled() -> bool:
"""Check if telemetry is enabled."""
# Disable during pytest runs
if 'pytest' in sys.modules:
return False
# Check environment variable (default: enabled)
env_value = os.environ.get(TELEMETRY_ENV_VAR, 'true').lower()
return env_value in ('true', '1', 'yes', 'on')
def get_anonymous_id() -> str:
"""Get or create anonymous user ID."""
try:
# Create cache directory if it doesn't exist
CACHE_DIR.mkdir(parents=True, exist_ok=True)
# Try to read existing ID
if ANON_ID_FILE.exists():
try:
return ANON_ID_FILE.read_text().strip()
except Exception:
pass
# Generate new ID
anon_id = str(uuid.uuid4())
# Save to file
with contextlib.suppress(Exception):
ANON_ID_FILE.write_text(anon_id)
return anon_id
except Exception:
return 'UNKNOWN'
def get_graphiti_version() -> str:
"""Get Graphiti version."""
try:
# Try to get version from package metadata
import importlib.metadata
return importlib.metadata.version('graphiti-core')
except Exception:
return 'unknown'
def initialize_posthog():
"""Initialize PostHog client."""
try:
import posthog
posthog.api_key = POSTHOG_API_KEY
posthog.host = POSTHOG_HOST
return posthog
except ImportError:
# PostHog not installed, silently disable telemetry
return None
except Exception:
# Any other error, silently disable telemetry
return None
def capture_event(event_name: str, properties: dict[str, Any] | None = None) -> None:
"""Capture a telemetry event."""
if not is_telemetry_enabled():
return
try:
posthog_client = initialize_posthog()
if posthog_client is None:
return
# Get anonymous ID
user_id = get_anonymous_id()
# Prepare event properties
event_properties = {
'$process_person_profile': False,
'graphiti_version': get_graphiti_version(),
'architecture': platform.machine(),
**(properties or {}),
}
# Capture the event
posthog_client.capture(distinct_id=user_id, event=event_name, properties=event_properties)
except Exception:
# Silently handle all telemetry errors to avoid disrupting the main application
pass
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/telemetry/telemetry.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:tests/test_entity_exclusion_int.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timezone
import pytest
from pydantic import BaseModel, Field
from graphiti_core.graphiti import Graphiti
from graphiti_core.helpers import validate_excluded_entity_types
from tests.helpers_test import drivers, get_driver
pytestmark = pytest.mark.integration
pytest_plugins = ('pytest_asyncio',)
# Test entity type definitions
class Person(BaseModel):
"""A human person mentioned in the conversation."""
first_name: str | None = Field(None, description='First name of the person')
last_name: str | None = Field(None, description='Last name of the person')
occupation: str | None = Field(None, description='Job or profession of the person')
class Organization(BaseModel):
"""A company, institution, or organized group."""
organization_type: str | None = Field(
None, description='Type of organization (company, NGO, etc.)'
)
industry: str | None = Field(
None, description='Industry or sector the organization operates in'
)
class Location(BaseModel):
"""A geographic location, place, or address."""
location_type: str | None = Field(
None, description='Type of location (city, country, building, etc.)'
)
coordinates: str | None = Field(None, description='Geographic coordinates if available')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'driver',
drivers,
)
async def test_exclude_default_entity_type(driver):
"""Test excluding the default 'Entity' type while keeping custom types."""
graphiti = Graphiti(graph_driver=get_driver(driver))
try:
await graphiti.build_indices_and_constraints()
# Define entity types but exclude the default 'Entity' type
entity_types = {
'Person': Person,
'Organization': Organization,
}
# Add an episode that would normally create both Entity and custom type entities
episode_content = (
'John Smith works at Acme Corporation in New York. The weather is nice today.'
)
result = await graphiti.add_episode(
name='Business Meeting',
episode_body=episode_content,
source_description='Meeting notes',
reference_time=datetime.now(timezone.utc),
entity_types=entity_types,
excluded_entity_types=['Entity'], # Exclude default type
group_id='test_exclude_default',
)
# Verify that nodes were created (custom types should still work)
assert result is not None
# Search for nodes to verify only custom types were created
search_results = await graphiti.search_(
query='John Smith Acme Corporation', group_ids=['test_exclude_default']
)
# Check that entities were created but with specific types, not default 'Entity'
found_nodes = search_results.nodes
for node in found_nodes:
assert 'Entity' in node.labels # All nodes should have Entity label
# But they should also have specific type labels
assert any(label in ['Person', 'Organization'] for label in node.labels), (
f'Node {node.name} should have a specific type label, got: {node.labels}'
)
# Clean up
await _cleanup_test_nodes(graphiti, 'test_exclude_default')
finally:
await graphiti.close()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'driver',
drivers,
)
async def test_exclude_specific_custom_types(driver):
"""Test excluding specific custom entity types while keeping others."""
graphiti = Graphiti(graph_driver=get_driver(driver))
try:
await graphiti.build_indices_and_constraints()
# Define multiple entity types
entity_types = {
'Person': Person,
'Organization': Organization,
'Location': Location,
}
# Add an episode with content that would create all types
episode_content = (
'Sarah Johnson from Google visited the San Francisco office to discuss the new project.'
)
result = await graphiti.add_episode(
name='Office Visit',
episode_body=episode_content,
source_description='Visit report',
reference_time=datetime.now(timezone.utc),
entity_types=entity_types,
excluded_entity_types=['Organization', 'Location'], # Exclude these types
group_id='test_exclude_custom',
)
assert result is not None
# Search for nodes to verify only Person and Entity types were created
search_results = await graphiti.search_(
query='Sarah Johnson Google San Francisco', group_ids=['test_exclude_custom']
)
found_nodes = search_results.nodes
# Should have Person and Entity type nodes, but no Organization or Location
for node in found_nodes:
assert 'Entity' in node.labels
# Should not have excluded types
assert 'Organization' not in node.labels, (
f'Found excluded Organization in node: {node.name}'
)
assert 'Location' not in node.labels, f'Found excluded Location in node: {node.name}'
# Should find at least one Person entity (Sarah Johnson)
person_nodes = [n for n in found_nodes if 'Person' in n.labels]
assert len(person_nodes) > 0, 'Should have found at least one Person entity'
# Clean up
await _cleanup_test_nodes(graphiti, 'test_exclude_custom')
finally:
await graphiti.close()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'driver',
drivers,
)
async def test_exclude_all_types(driver):
"""Test excluding all entity types (edge case)."""
graphiti = Graphiti(graph_driver=get_driver(driver))
try:
await graphiti.build_indices_and_constraints()
entity_types = {
'Person': Person,
'Organization': Organization,
}
# Exclude all types
result = await graphiti.add_episode(
name='No Entities',
episode_body='This text mentions John and Microsoft but no entities should be created.',
source_description='Test content',
reference_time=datetime.now(timezone.utc),
entity_types=entity_types,
excluded_entity_types=['Entity', 'Person', 'Organization'], # Exclude everything
group_id='test_exclude_all',
)
assert result is not None
# Search for nodes - should find very few or none from this episode
search_results = await graphiti.search_(
query='John Microsoft', group_ids=['test_exclude_all']
)
# There should be minimal to no entities created
found_nodes = search_results.nodes
assert len(found_nodes) == 0, (
f'Expected no entities, but found: {[n.name for n in found_nodes]}'
)
# Clean up
await _cleanup_test_nodes(graphiti, 'test_exclude_all')
finally:
await graphiti.close()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'driver',
drivers,
)
async def test_exclude_no_types(driver):
"""Test normal behavior when no types are excluded (baseline test)."""
graphiti = Graphiti(graph_driver=get_driver(driver))
try:
await graphiti.build_indices_and_constraints()
entity_types = {
'Person': Person,
'Organization': Organization,
}
# Don't exclude any types
result = await graphiti.add_episode(
name='Normal Behavior',
episode_body='Alice Smith works at TechCorp.',
source_description='Normal test',
reference_time=datetime.now(timezone.utc),
entity_types=entity_types,
excluded_entity_types=None, # No exclusions
group_id='test_exclude_none',
)
assert result is not None
# Search for nodes - should find entities of all types
search_results = await graphiti.search_(
query='Alice Smith TechCorp', group_ids=['test_exclude_none']
)
found_nodes = search_results.nodes
assert len(found_nodes) > 0, 'Should have found some entities'
# Should have both Person and Organization entities
person_nodes = [n for n in found_nodes if 'Person' in n.labels]
org_nodes = [n for n in found_nodes if 'Organization' in n.labels]
assert len(person_nodes) > 0, 'Should have found Person entities'
assert len(org_nodes) > 0, 'Should have found Organization entities'
# Clean up
await _cleanup_test_nodes(graphiti, 'test_exclude_none')
finally:
await graphiti.close()
def test_validation_valid_excluded_types():
"""Test validation function with valid excluded types."""
entity_types = {
'Person': Person,
'Organization': Organization,
}
# Valid exclusions
assert validate_excluded_entity_types(['Entity'], entity_types) is True
assert validate_excluded_entity_types(['Person'], entity_types) is True
assert validate_excluded_entity_types(['Entity', 'Person'], entity_types) is True
assert validate_excluded_entity_types(None, entity_types) is True
assert validate_excluded_entity_types([], entity_types) is True
def test_validation_invalid_excluded_types():
"""Test validation function with invalid excluded types."""
entity_types = {
'Person': Person,
'Organization': Organization,
}
# Invalid exclusions should raise ValueError
with pytest.raises(ValueError, match='Invalid excluded entity types'):
validate_excluded_entity_types(['InvalidType'], entity_types)
with pytest.raises(ValueError, match='Invalid excluded entity types'):
validate_excluded_entity_types(['Person', 'NonExistentType'], entity_types)
@pytest.mark.asyncio
@pytest.mark.parametrize(
'driver',
drivers,
)
async def test_excluded_types_parameter_validation_in_add_episode(driver):
"""Test that add_episode validates excluded_entity_types parameter."""
graphiti = Graphiti(graph_driver=get_driver(driver))
try:
entity_types = {
'Person': Person,
}
# Should raise ValueError for invalid excluded type
with pytest.raises(ValueError, match='Invalid excluded entity types'):
await graphiti.add_episode(
name='Invalid Test',
episode_body='Test content',
source_description='Test',
reference_time=datetime.now(timezone.utc),
entity_types=entity_types,
excluded_entity_types=['NonExistentType'],
group_id='test_validation',
)
finally:
await graphiti.close()
async def _cleanup_test_nodes(graphiti: Graphiti, group_id: str):
"""Helper function to clean up test nodes."""
try:
# Get all nodes for this group
search_results = await graphiti.search_(query='*', group_ids=[group_id])
# Delete all found nodes
for node in search_results.nodes:
await node.delete(graphiti.driver)
except Exception as e:
# Log but don't fail the test if cleanup fails
print(f'Warning: Failed to clean up test nodes for group {group_id}: {e}')
| {
"repo_id": "getzep/graphiti",
"file_path": "tests/test_entity_exclusion_int.py",
"license": "Apache License 2.0",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
getzep/graphiti:graphiti_core/llm_client/openai_base_client.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import typing
from abc import abstractmethod
from typing import Any, ClassVar
import openai
from openai.types.chat import ChatCompletionMessageParam
from pydantic import BaseModel
from ..prompts.models import Message
from .client import LLMClient, get_extraction_language_instruction
from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
from .errors import RateLimitError, RefusalError
logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gpt-4.1-mini'
DEFAULT_SMALL_MODEL = 'gpt-4.1-nano'
DEFAULT_REASONING = 'minimal'
DEFAULT_VERBOSITY = 'low'
class BaseOpenAIClient(LLMClient):
"""
Base client class for OpenAI-compatible APIs (OpenAI and Azure OpenAI).
This class contains shared logic for both OpenAI and Azure OpenAI clients,
reducing code duplication while allowing for implementation-specific differences.
"""
# Class-level constants
MAX_RETRIES: ClassVar[int] = 2
def __init__(
self,
config: LLMConfig | None = None,
cache: bool = False,
max_tokens: int = DEFAULT_MAX_TOKENS,
reasoning: str | None = DEFAULT_REASONING,
verbosity: str | None = DEFAULT_VERBOSITY,
):
if cache:
raise NotImplementedError('Caching is not implemented for OpenAI-based clients')
if config is None:
config = LLMConfig()
super().__init__(config, cache)
self.max_tokens = max_tokens
self.reasoning = reasoning
self.verbosity = verbosity
@abstractmethod
async def _create_completion(
self,
model: str,
messages: list[ChatCompletionMessageParam],
temperature: float | None,
max_tokens: int,
response_model: type[BaseModel] | None = None,
) -> Any:
"""Create a completion using the specific client implementation."""
pass
@abstractmethod
async def _create_structured_completion(
self,
model: str,
messages: list[ChatCompletionMessageParam],
temperature: float | None,
max_tokens: int,
response_model: type[BaseModel],
reasoning: str | None,
verbosity: str | None,
) -> Any:
"""Create a structured completion using the specific client implementation."""
pass
def _convert_messages_to_openai_format(
self, messages: list[Message]
) -> list[ChatCompletionMessageParam]:
"""Convert internal Message format to OpenAI ChatCompletionMessageParam format."""
openai_messages: list[ChatCompletionMessageParam] = []
for m in messages:
m.content = self._clean_input(m.content)
if m.role == 'user':
openai_messages.append({'role': 'user', 'content': m.content})
elif m.role == 'system':
openai_messages.append({'role': 'system', 'content': m.content})
return openai_messages
def _get_model_for_size(self, model_size: ModelSize) -> str:
"""Get the appropriate model name based on the requested size."""
if model_size == ModelSize.small:
return self.small_model or DEFAULT_SMALL_MODEL
else:
return self.model or DEFAULT_MODEL
def _handle_structured_response(self, response: Any) -> tuple[dict[str, Any], int, int]:
"""Handle structured response parsing and validation.
Returns:
tuple: (parsed_response, input_tokens, output_tokens)
"""
response_object = response.output_text
# Extract token usage
input_tokens = 0
output_tokens = 0
if hasattr(response, 'usage') and response.usage:
input_tokens = getattr(response.usage, 'input_tokens', 0) or 0
output_tokens = getattr(response.usage, 'output_tokens', 0) or 0
if response_object:
return json.loads(response_object), input_tokens, output_tokens
elif hasattr(response, 'refusal') and response.refusal:
raise RefusalError(response.refusal)
else:
raise Exception(f'Invalid response from LLM: {response}')
def _handle_json_response(self, response: Any) -> tuple[dict[str, Any], int, int]:
"""Handle JSON response parsing.
Returns:
tuple: (parsed_response, input_tokens, output_tokens)
"""
result = response.choices[0].message.content or '{}'
# Extract token usage
input_tokens = 0
output_tokens = 0
if hasattr(response, 'usage') and response.usage:
input_tokens = getattr(response.usage, 'prompt_tokens', 0) or 0
output_tokens = getattr(response.usage, 'completion_tokens', 0) or 0
return json.loads(result), input_tokens, output_tokens
async def _generate_response(
self,
messages: list[Message],
response_model: type[BaseModel] | None = None,
max_tokens: int = DEFAULT_MAX_TOKENS,
model_size: ModelSize = ModelSize.medium,
) -> tuple[dict[str, Any], int, int]:
"""Generate a response using the appropriate client implementation.
Returns:
tuple: (response_dict, input_tokens, output_tokens)
"""
openai_messages = self._convert_messages_to_openai_format(messages)
model = self._get_model_for_size(model_size)
try:
if response_model:
response = await self._create_structured_completion(
model=model,
messages=openai_messages,
temperature=self.temperature,
max_tokens=max_tokens or self.max_tokens,
response_model=response_model,
reasoning=self.reasoning,
verbosity=self.verbosity,
)
return self._handle_structured_response(response)
else:
response = await self._create_completion(
model=model,
messages=openai_messages,
temperature=self.temperature,
max_tokens=max_tokens or self.max_tokens,
)
return self._handle_json_response(response)
except openai.LengthFinishReasonError as e:
raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
except openai.RateLimitError as e:
raise RateLimitError from e
except openai.AuthenticationError as e:
logger.error(
f'OpenAI Authentication Error: {e}. Please verify your API key is correct.'
)
raise
except Exception as e:
# Provide more context for connection errors
error_msg = str(e)
if 'Connection error' in error_msg or 'connection' in error_msg.lower():
logger.error(
f'Connection error communicating with OpenAI API. Please check your network connection and API key. Error: {e}'
)
else:
logger.error(f'Error in generating LLM response: {e}')
raise
async def generate_response(
self,
messages: list[Message],
response_model: type[BaseModel] | None = None,
max_tokens: int | None = None,
model_size: ModelSize = ModelSize.medium,
group_id: str | None = None,
prompt_name: str | None = None,
) -> dict[str, typing.Any]:
"""Generate a response with retry logic and error handling."""
if max_tokens is None:
max_tokens = self.max_tokens
# Add multilingual extraction instructions
messages[0].content += get_extraction_language_instruction(group_id)
# Wrap entire operation in tracing span
with self.tracer.start_span('llm.generate') as span:
attributes = {
'llm.provider': 'openai',
'model.size': model_size.value,
'max_tokens': max_tokens,
}
if prompt_name:
attributes['prompt.name'] = prompt_name
span.add_attributes(attributes)
retry_count = 0
last_error = None
total_input_tokens = 0
total_output_tokens = 0
while retry_count <= self.MAX_RETRIES:
try:
response, input_tokens, output_tokens = await self._generate_response(
messages, response_model, max_tokens, model_size
)
total_input_tokens += input_tokens
total_output_tokens += output_tokens
# Record token usage
self.token_tracker.record(prompt_name, total_input_tokens, total_output_tokens)
return response
except (RateLimitError, RefusalError):
# These errors should not trigger retries
span.set_status('error', str(last_error))
raise
except (
openai.APITimeoutError,
openai.APIConnectionError,
openai.InternalServerError,
):
# Let OpenAI's client handle these retries
span.set_status('error', str(last_error))
raise
except Exception as e:
last_error = e
# Don't retry if we've hit the max retries
if retry_count >= self.MAX_RETRIES:
logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
span.set_status('error', str(e))
span.record_exception(e)
raise
retry_count += 1
# Construct a detailed error message for the LLM
error_context = (
f'The previous response attempt was invalid. '
f'Error type: {e.__class__.__name__}. '
f'Error details: {str(e)}. '
f'Please try again with a valid response, ensuring the output matches '
f'the expected format and constraints.'
)
error_message = Message(role='user', content=error_context)
messages.append(error_message)
logger.warning(
f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
)
# If we somehow get here, raise the last error
span.set_status('error', str(last_error))
raise last_error or Exception('Max retries exceeded with no specific error')
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/llm_client/openai_base_client.py",
"license": "Apache License 2.0",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:examples/quickstart/quickstart_falkordb.py | """
Copyright 2025, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import json
import logging
import os
from datetime import datetime, timezone
from logging import INFO
from dotenv import load_dotenv
from graphiti_core import Graphiti
from graphiti_core.driver.falkordb_driver import FalkorDriver
from graphiti_core.nodes import EpisodeType
from graphiti_core.search.search_config_recipes import NODE_HYBRID_SEARCH_RRF
#################################################
# CONFIGURATION
#################################################
# Set up logging and environment variables for
# connecting to FalkorDB database
#################################################
# Configure logging
logging.basicConfig(
level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
load_dotenv()
# FalkorDB connection parameters
# Make sure FalkorDB (on-premises) is running — see https://docs.falkordb.com/
# By default, FalkorDB does not require a username or password,
# but you can set them via environment variables for added security.
#
# If you're using FalkorDB Cloud, set the environment variables accordingly.
# For on-premises use, you can leave them as None or set them to your preferred values.
#
# The default host and port are 'localhost' and '6379', respectively.
# You can override these values in your environment variables or directly in the code.
falkor_username = os.environ.get('FALKORDB_USERNAME', None)
falkor_password = os.environ.get('FALKORDB_PASSWORD', None)
falkor_host = os.environ.get('FALKORDB_HOST', 'localhost')
falkor_port = os.environ.get('FALKORDB_PORT', '6379')
async def main():
#################################################
# INITIALIZATION
#################################################
# Connect to FalkorDB and set up Graphiti indices
# This is required before using other Graphiti
# functionality
#################################################
# Initialize Graphiti with FalkorDB connection
falkor_driver = FalkorDriver(
host=falkor_host, port=falkor_port, username=falkor_username, password=falkor_password
)
graphiti = Graphiti(graph_driver=falkor_driver)
try:
#################################################
# ADDING EPISODES
#################################################
# Episodes are the primary units of information
# in Graphiti. They can be text or structured JSON
# and are automatically processed to extract entities
# and relationships.
#################################################
# Example: Add Episodes
# Episodes list containing both text and JSON episodes
episodes = [
{
'content': 'Kamala Harris is the Attorney General of California. She was previously '
'the district attorney for San Francisco.',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': 'As AG, Harris was in office from January 3, 2011 – January 3, 2017',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': {
'name': 'Gavin Newsom',
'position': 'Governor',
'state': 'California',
'previous_role': 'Lieutenant Governor',
'previous_location': 'San Francisco',
},
'type': EpisodeType.json,
'description': 'podcast metadata',
},
{
'content': {
'name': 'Gavin Newsom',
'position': 'Governor',
'term_start': 'January 7, 2019',
'term_end': 'Present',
},
'type': EpisodeType.json,
'description': 'podcast metadata',
},
]
# Add episodes to the graph
for i, episode in enumerate(episodes):
await graphiti.add_episode(
name=f'Freakonomics Radio {i}',
episode_body=episode['content']
if isinstance(episode['content'], str)
else json.dumps(episode['content']),
source=episode['type'],
source_description=episode['description'],
reference_time=datetime.now(timezone.utc),
)
print(f'Added episode: Freakonomics Radio {i} ({episode["type"].value})')
#################################################
# BASIC SEARCH
#################################################
# The simplest way to retrieve relationships (edges)
# from Graphiti is using the search method, which
# performs a hybrid search combining semantic
# similarity and BM25 text retrieval.
#################################################
# Perform a hybrid search combining semantic similarity and BM25 retrieval
print("\nSearching for: 'Who was the California Attorney General?'")
results = await graphiti.search('Who was the California Attorney General?')
# Print search results
print('\nSearch Results:')
for result in results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
#################################################
# CENTER NODE SEARCH
#################################################
# For more contextually relevant results, you can
# use a center node to rerank search results based
# on their graph distance to a specific node
#################################################
# Use the top search result's UUID as the center node for reranking
if results and len(results) > 0:
# Get the source node UUID from the top result
center_node_uuid = results[0].source_node_uuid
print('\nReranking search results based on graph distance:')
print(f'Using center node UUID: {center_node_uuid}')
reranked_results = await graphiti.search(
'Who was the California Attorney General?', center_node_uuid=center_node_uuid
)
# Print reranked search results
print('\nReranked Search Results:')
for result in reranked_results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
else:
print('No results found in the initial search to use as center node.')
#################################################
# NODE SEARCH USING SEARCH RECIPES
#################################################
# Graphiti provides predefined search recipes
# optimized for different search scenarios.
# Here we use NODE_HYBRID_SEARCH_RRF for retrieving
# nodes directly instead of edges.
#################################################
# Example: Perform a node search using _search method with standard recipes
print(
'\nPerforming node search using _search method with standard recipe NODE_HYBRID_SEARCH_RRF:'
)
# Use a predefined search configuration recipe and modify its limit
node_search_config = NODE_HYBRID_SEARCH_RRF.model_copy(deep=True)
node_search_config.limit = 5 # Limit to 5 results
# Execute the node search
node_search_results = await graphiti._search(
query='California Governor',
config=node_search_config,
)
# Print node search results
print('\nNode Search Results:')
for node in node_search_results.nodes:
print(f'Node UUID: {node.uuid}')
print(f'Node Name: {node.name}')
node_summary = node.summary[:100] + '...' if len(node.summary) > 100 else node.summary
print(f'Content Summary: {node_summary}')
print(f'Node Labels: {", ".join(node.labels)}')
print(f'Created At: {node.created_at}')
if hasattr(node, 'attributes') and node.attributes:
print('Attributes:')
for key, value in node.attributes.items():
print(f' {key}: {value}')
print('---')
finally:
#################################################
# CLEANUP
#################################################
# Always close the connection to FalkorDB when
# finished to properly release resources
#################################################
# Close the connection
await graphiti.close()
print('\nConnection closed')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "getzep/graphiti",
"file_path": "examples/quickstart/quickstart_falkordb.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
getzep/graphiti:graphiti_core/driver/driver.py | """
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import copy
import logging
import os
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator, Coroutine
from contextlib import asynccontextmanager
from enum import Enum
from typing import TYPE_CHECKING, Any
from dotenv import load_dotenv
from graphiti_core.driver.graph_operations.graph_operations import GraphOperationsInterface
from graphiti_core.driver.query_executor import QueryExecutor, Transaction
from graphiti_core.driver.search_interface.search_interface import SearchInterface
if TYPE_CHECKING:
from graphiti_core.driver.operations.community_edge_ops import CommunityEdgeOperations
from graphiti_core.driver.operations.community_node_ops import CommunityNodeOperations
from graphiti_core.driver.operations.entity_edge_ops import EntityEdgeOperations
from graphiti_core.driver.operations.entity_node_ops import EntityNodeOperations
from graphiti_core.driver.operations.episode_node_ops import EpisodeNodeOperations
from graphiti_core.driver.operations.episodic_edge_ops import EpisodicEdgeOperations
from graphiti_core.driver.operations.graph_ops import GraphMaintenanceOperations
from graphiti_core.driver.operations.has_episode_edge_ops import HasEpisodeEdgeOperations
from graphiti_core.driver.operations.next_episode_edge_ops import NextEpisodeEdgeOperations
from graphiti_core.driver.operations.saga_node_ops import SagaNodeOperations
from graphiti_core.driver.operations.search_ops import SearchOperations
logger = logging.getLogger(__name__)
DEFAULT_SIZE = 10
load_dotenv()
ENTITY_INDEX_NAME = os.environ.get('ENTITY_INDEX_NAME', 'entities')
EPISODE_INDEX_NAME = os.environ.get('EPISODE_INDEX_NAME', 'episodes')
COMMUNITY_INDEX_NAME = os.environ.get('COMMUNITY_INDEX_NAME', 'communities')
ENTITY_EDGE_INDEX_NAME = os.environ.get('ENTITY_EDGE_INDEX_NAME', 'entity_edges')
class GraphProvider(Enum):
NEO4J = 'neo4j'
FALKORDB = 'falkordb'
KUZU = 'kuzu'
NEPTUNE = 'neptune'
class GraphDriverSession(ABC):
provider: GraphProvider
async def __aenter__(self):
return self
@abstractmethod
async def __aexit__(self, exc_type, exc, tb):
# No cleanup needed for Falkor, but method must exist
pass
@abstractmethod
async def run(self, query: str, **kwargs: Any) -> Any:
raise NotImplementedError()
@abstractmethod
async def close(self):
raise NotImplementedError()
@abstractmethod
async def execute_write(self, func, *args, **kwargs):
raise NotImplementedError()
class GraphDriver(QueryExecutor, ABC):
provider: GraphProvider
fulltext_syntax: str = (
'' # Neo4j (default) syntax does not require a prefix for fulltext queries
)
_database: str
default_group_id: str = ''
# Legacy interfaces (kept for backwards compatibility during Phase 1)
search_interface: SearchInterface | None = None
graph_operations_interface: GraphOperationsInterface | None = None
@abstractmethod
def execute_query(self, cypher_query_: str, **kwargs: Any) -> Coroutine:
raise NotImplementedError()
@abstractmethod
def session(self, database: str | None = None) -> GraphDriverSession:
raise NotImplementedError()
@abstractmethod
def close(self):
raise NotImplementedError()
@abstractmethod
def delete_all_indexes(self) -> Coroutine:
raise NotImplementedError()
def with_database(self, database: str) -> GraphDriver:
"""
Returns a shallow copy of this driver with a different default database.
Reuses the same connection (e.g. FalkorDB, Neo4j).
"""
cloned = copy.copy(self)
cloned._database = database
return cloned
@abstractmethod
async def build_indices_and_constraints(self, delete_existing: bool = False):
raise NotImplementedError()
def clone(self, database: str) -> GraphDriver:
"""Clone the driver with a different database or graph name."""
return self
def build_fulltext_query(
self, query: str, group_ids: list[str] | None = None, max_query_length: int = 128
) -> str:
"""
Specific fulltext query builder for database providers.
Only implemented by providers that need custom fulltext query building.
"""
raise NotImplementedError(f'build_fulltext_query not implemented for {self.provider}')
# --- New operations interfaces ---
@asynccontextmanager
async def transaction(self) -> AsyncIterator[Transaction]:
"""Return a transaction context manager.
Usage::
async with driver.transaction() as tx:
await ops.save(driver, node, tx=tx)
Drivers with real transaction support (e.g., Neo4j) commit on clean exit
and roll back on exception. Drivers without native transactions return a
thin wrapper where queries execute immediately.
The base implementation provides a no-op wrapper using the session. Drivers
should override this to provide real transaction semantics where supported.
"""
session = self.session()
try:
yield _SessionTransaction(session)
finally:
await session.close()
@property
def entity_node_ops(self) -> EntityNodeOperations | None:
return None
@property
def episode_node_ops(self) -> EpisodeNodeOperations | None:
return None
@property
def community_node_ops(self) -> CommunityNodeOperations | None:
return None
@property
def saga_node_ops(self) -> SagaNodeOperations | None:
return None
@property
def entity_edge_ops(self) -> EntityEdgeOperations | None:
return None
@property
def episodic_edge_ops(self) -> EpisodicEdgeOperations | None:
return None
@property
def community_edge_ops(self) -> CommunityEdgeOperations | None:
return None
@property
def has_episode_edge_ops(self) -> HasEpisodeEdgeOperations | None:
return None
@property
def next_episode_edge_ops(self) -> NextEpisodeEdgeOperations | None:
return None
@property
def search_ops(self) -> SearchOperations | None:
return None
@property
def graph_ops(self) -> GraphMaintenanceOperations | None:
return None
class _SessionTransaction(Transaction):
"""Fallback transaction that wraps a session — queries execute immediately."""
def __init__(self, session: GraphDriverSession):
self._session = session
async def run(self, query: str, **kwargs: Any) -> Any:
return await self._session.run(query, **kwargs)
| {
"repo_id": "getzep/graphiti",
"file_path": "graphiti_core/driver/driver.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.