sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
infiniflow/ragflow:test/testcases/test_http_api/test_dataset_management/test_dify_retrieval_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import sys
from copy import deepcopy
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyKB:
def __init__(self, tenant_id="tenant-1", embd_id="embd-1"):
self.tenant_id = tenant_id
self.embd_id = embd_id
class _DummyRetriever:
async def retrieval(self, *_args, **_kwargs):
return {
"chunks": [
{"doc_id": "doc-1", "content_with_weight": "chunk-content", "similarity": 0.8, "docnm_kwd": "doc-title", "vector": [0.1]}
]
}
def retrieval_by_children(self, chunks, _tenant_ids):
return chunks
def _run(coro):
return asyncio.run(coro)
def _load_dify_retrieval_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
deepdoc_pkg = ModuleType("deepdoc")
deepdoc_parser_pkg = ModuleType("deepdoc.parser")
deepdoc_parser_pkg.__path__ = []
class _StubPdfParser:
pass
class _StubExcelParser:
pass
class _StubDocxParser:
pass
deepdoc_parser_pkg.PdfParser = _StubPdfParser
deepdoc_parser_pkg.ExcelParser = _StubExcelParser
deepdoc_parser_pkg.DocxParser = _StubDocxParser
deepdoc_pkg.parser = deepdoc_parser_pkg
monkeypatch.setitem(sys.modules, "deepdoc", deepdoc_pkg)
monkeypatch.setitem(sys.modules, "deepdoc.parser", deepdoc_parser_pkg)
deepdoc_excel_module = ModuleType("deepdoc.parser.excel_parser")
deepdoc_excel_module.RAGFlowExcelParser = _StubExcelParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.excel_parser", deepdoc_excel_module)
deepdoc_parser_utils = ModuleType("deepdoc.parser.utils")
deepdoc_parser_utils.get_text = lambda *_args, **_kwargs: ""
monkeypatch.setitem(sys.modules, "deepdoc.parser.utils", deepdoc_parser_utils)
monkeypatch.setitem(sys.modules, "xgboost", ModuleType("xgboost"))
module_name = "test_dify_retrieval_routes_unit_module"
module_path = repo_root / "api" / "apps" / "sdk" / "dify_retrieval.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(deepcopy(payload)))
@pytest.mark.p2
def test_retrieval_success_with_metadata_and_kg(monkeypatch):
module = _load_dify_retrieval_module(monkeypatch)
_set_request_json(
monkeypatch,
module,
{
"knowledge_id": "kb-1",
"query": "hello",
"use_kg": True,
"retrieval_setting": {"score_threshold": 0.1, "top_k": 3},
"metadata_condition": {"conditions": [{"name": "author", "comparison_operator": "is", "value": "alice"}], "logic": "and"},
},
)
monkeypatch.setattr(module, "jsonify", lambda payload: payload)
monkeypatch.setattr(module.DocMetadataService, "get_meta_by_kbs", lambda _kb_ids: [{"doc_id": "doc-1"}])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _DummyKB()))
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: object())
monkeypatch.setattr(module, "convert_conditions", lambda cond: cond.get("conditions", []))
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: [])
retriever = _DummyRetriever()
monkeypatch.setattr(module.settings, "retriever", retriever)
class _DummyKgRetriever:
async def retrieval(self, *_args, **_kwargs):
return {
"doc_id": "doc-2",
"content_with_weight": "kg-content",
"similarity": 0.9,
"docnm_kwd": "kg-title",
}
monkeypatch.setattr(module.settings, "kg_retriever", _DummyKgRetriever())
monkeypatch.setattr(
module.DocumentService,
"get_by_id",
lambda doc_id: (True, SimpleNamespace(meta_fields={"origin": f"meta-{doc_id}"})),
)
monkeypatch.setattr(module, "label_question", lambda *_args, **_kwargs: [])
res = _run(inspect.unwrap(module.retrieval)("tenant-1"))
assert "records" in res, res
assert len(res["records"]) == 2, res
top = res["records"][0]
assert top["title"] == "kg-title", res
assert top["metadata"]["doc_id"] == "doc-2", res
assert "score" in top, res
@pytest.mark.p2
def test_retrieval_kb_not_found(monkeypatch):
module = _load_dify_retrieval_module(monkeypatch)
_set_request_json(monkeypatch, module, {"knowledge_id": "kb-missing", "query": "hello"})
monkeypatch.setattr(module.DocMetadataService, "get_meta_by_kbs", lambda _kb_ids: [])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(inspect.unwrap(module.retrieval)("tenant-1"))
assert res["code"] == module.RetCode.NOT_FOUND, res
assert "Knowledgebase not found" in res["message"], res
@pytest.mark.p2
def test_retrieval_not_found_exception_mapping(monkeypatch):
module = _load_dify_retrieval_module(monkeypatch)
_set_request_json(monkeypatch, module, {"knowledge_id": "kb-1", "query": "hello"})
monkeypatch.setattr(module.DocMetadataService, "get_meta_by_kbs", lambda _kb_ids: [])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _DummyKB()))
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: object())
monkeypatch.setattr(module, "label_question", lambda *_args, **_kwargs: [])
class _BrokenRetriever:
async def retrieval(self, *_args, **_kwargs):
raise RuntimeError("chunk_not_found_error")
monkeypatch.setattr(module.settings, "retriever", _BrokenRetriever())
res = _run(inspect.unwrap(module.retrieval)("tenant-1"))
assert res["code"] == module.RetCode.NOT_FOUND, res
assert "No chunk found" in res["message"], res
@pytest.mark.p2
def test_retrieval_generic_exception_mapping(monkeypatch):
module = _load_dify_retrieval_module(monkeypatch)
_set_request_json(monkeypatch, module, {"knowledge_id": "kb-1", "query": "hello"})
monkeypatch.setattr(module.DocMetadataService, "get_meta_by_kbs", lambda _kb_ids: [])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _DummyKB()))
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: object())
monkeypatch.setattr(module, "label_question", lambda *_args, **_kwargs: [])
class _BrokenRetriever:
async def retrieval(self, *_args, **_kwargs):
raise RuntimeError("boom")
monkeypatch.setattr(module.settings, "retriever", _BrokenRetriever())
res = _run(inspect.unwrap(module.retrieval)("tenant-1"))
assert res["code"] == module.RetCode.SERVER_ERROR, res
assert "boom" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_dataset_management/test_dify_retrieval_routes_unit.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_app/test_file_routes.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import functools
import importlib.util
import sys
from enum import Enum
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
def _load_files_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_pkg = ModuleType("api.apps")
apps_pkg.__path__ = [str(repo_root / "api" / "apps")]
monkeypatch.setitem(sys.modules, "api.apps", apps_pkg)
api_pkg.apps = apps_pkg
sdk_pkg = ModuleType("api.apps.sdk")
sdk_pkg.__path__ = [str(repo_root / "api" / "apps" / "sdk")]
monkeypatch.setitem(sys.modules, "api.apps.sdk", sdk_pkg)
apps_pkg.sdk = sdk_pkg
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
class _FileType(Enum):
FOLDER = "folder"
VIRTUAL = "virtual"
DOC = "doc"
VISUAL = "visual"
db_pkg.FileType = _FileType
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
services_pkg.duplicate_name = lambda _query, **kwargs: kwargs.get("name", "")
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
document_service_mod = ModuleType("api.db.services.document_service")
class _StubDocumentService:
@staticmethod
def get_by_id(_doc_id):
return True, SimpleNamespace(id=_doc_id)
@staticmethod
def get_tenant_id(_doc_id):
return "tenant1"
@staticmethod
def remove_document(*_args, **_kwargs):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def insert(_doc):
return SimpleNamespace(id="doc1")
document_service_mod.DocumentService = _StubDocumentService
monkeypatch.setitem(sys.modules, "api.db.services.document_service", document_service_mod)
services_pkg.document_service = document_service_mod
file2document_service_mod = ModuleType("api.db.services.file2document_service")
class _StubFile2DocumentService:
@staticmethod
def get_by_file_id(_file_id):
return []
@staticmethod
def delete_by_file_id(*_args, **_kwargs):
return None
@staticmethod
def get_storage_address(**_kwargs):
return "bucket", "location"
@staticmethod
def insert(_data):
return SimpleNamespace(to_json=lambda: {})
file2document_service_mod.File2DocumentService = _StubFile2DocumentService
monkeypatch.setitem(sys.modules, "api.db.services.file2document_service", file2document_service_mod)
services_pkg.file2document_service = file2document_service_mod
knowledgebase_service_mod = ModuleType("api.db.services.knowledgebase_service")
class _StubKnowledgebaseService:
@staticmethod
def get_by_id(_kb_id):
return False, None
knowledgebase_service_mod.KnowledgebaseService = _StubKnowledgebaseService
monkeypatch.setitem(sys.modules, "api.db.services.knowledgebase_service", knowledgebase_service_mod)
services_pkg.knowledgebase_service = knowledgebase_service_mod
file_service_mod = ModuleType("api.db.services.file_service")
class _StubFileService:
@staticmethod
def get_root_folder(_tenant_id):
return {"id": "root"}
@staticmethod
def get_by_id(_file_id):
return True, SimpleNamespace(id=_file_id, parent_id="root", location="file", tenant_id="tenant1")
@staticmethod
def get_id_list_by_id(_pf_id, _file_obj_names, _idx, ids):
return ids
@staticmethod
def create_folder(_file, parent_id, _file_obj_names, _len_id_list):
return SimpleNamespace(id=parent_id)
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def insert(data):
return SimpleNamespace(to_json=lambda: data)
@staticmethod
def is_parent_folder_exist(_pf_id):
return True
@staticmethod
def get_by_pf_id(*_args, **_kwargs):
return [], 0
@staticmethod
def get_parent_folder(_file_id):
return SimpleNamespace(to_json=lambda: {"id": "root"})
@staticmethod
def get_all_parent_folders(_file_id):
return []
@staticmethod
def get_all_innermost_file_ids(_file_id, _acc):
return []
@staticmethod
def delete_folder_by_pf_id(*_args, **_kwargs):
return None
@staticmethod
def delete(_file):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def get_by_ids(_file_ids):
return []
@staticmethod
def move_file(*_args, **_kwargs):
return None
@staticmethod
def init_knowledgebase_docs(*_args, **_kwargs):
return None
@staticmethod
def get_parser(_file_type, _file_name, parser_id):
return parser_id
file_service_mod.FileService = _StubFileService
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
services_pkg.file_service = file_service_mod
api_utils_mod = ModuleType("api.utils.api_utils")
def get_json_result(data=None, message="", code=0):
return {"code": code, "data": data, "message": message}
async def get_request_json():
return {}
def server_error_response(err):
return {"code": 100, "data": None, "message": str(err)}
def token_required(func):
@functools.wraps(func)
async def _wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return _wrapper
api_utils_mod.get_json_result = get_json_result
api_utils_mod.get_request_json = get_request_json
api_utils_mod.server_error_response = server_error_response
api_utils_mod.token_required = token_required
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
file_utils_mod = ModuleType("api.utils.file_utils")
file_utils_mod.filename_type = lambda _filename: _FileType.DOC.value
monkeypatch.setitem(sys.modules, "api.utils.file_utils", file_utils_mod)
web_utils_mod = ModuleType("api.utils.web_utils")
web_utils_mod.CONTENT_TYPE_MAP = {"txt": "text/plain", "json": "application/json"}
web_utils_mod.apply_safe_file_response_headers = lambda response, *_args, **_kwargs: response
monkeypatch.setitem(sys.modules, "api.utils.web_utils", web_utils_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
common_pkg.settings = SimpleNamespace(
STORAGE_IMPL=SimpleNamespace(
obj_exist=lambda *_args, **_kwargs: False,
put=lambda *_args, **_kwargs: None,
get=lambda *_args, **_kwargs: b"",
rm=lambda *_args, **_kwargs: None,
)
)
monkeypatch.setitem(sys.modules, "common", common_pkg)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.get_uuid = lambda: "uuid"
async def thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
misc_utils_mod.thread_pool_exec = thread_pool_exec
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
constants_mod = ModuleType("common.constants")
class _RetCode:
SUCCESS = 0
BAD_REQUEST = 400
NOT_FOUND = 404
CONFLICT = 409
SERVER_ERROR = 500
constants_mod.RetCode = _RetCode
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
module_path = repo_root / "api" / "apps" / "sdk" / "files.py"
spec = importlib.util.spec_from_file_location("api.apps.sdk.files", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, "api.apps.sdk.files", module)
spec.loader.exec_module(module)
return module
def _run(coro):
return asyncio.run(coro)
class _DummyFile:
def __init__(self, file_id, file_type, name="doc.txt", tenant_id="tenant1", parent_id="parent1", location=None):
self.id = file_id
self.type = file_type
self.name = name
self.location = location or name
self.size = 1
self.tenant_id = tenant_id
self.parent_id = parent_id
def to_json(self):
return {"id": self.id, "name": self.name, "type": self.type}
class _FalsyFile(_DummyFile):
def __bool__(self):
return False
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if value is None or type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _DummyRequest:
def __init__(self, *, args=None, form=None, files=None):
self.args = _Args(args or {})
self.form = _AwaitableValue(form or {})
self.files = _AwaitableValue(files if files is not None else _DummyFiles())
class _DummyUploadFile:
def __init__(self, filename, blob=b"file-bytes"):
self.filename = filename
self._blob = blob
def read(self):
return self._blob
class _DummyFiles(dict):
def __init__(self, file_objs=None):
super().__init__()
self._file_objs = file_objs or []
if file_objs is not None:
self["file"] = self._file_objs
def getlist(self, key):
if key == "file":
return list(self._file_objs)
return []
class _DummyResponse:
def __init__(self, data):
self.data = data
self.headers = {}
@pytest.mark.p2
class TestFileMoveUnit:
def test_move_success_and_invalid_parent(self, monkeypatch):
module = _load_files_app(monkeypatch)
file_id = "file1"
parent_id = "parent1"
async def fake_request_json():
return {"src_file_ids": [file_id], "dest_file_id": parent_id}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile(file_id, module.FileType.DOC.value)])
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pid: (True, _DummyFile(parent_id, module.FileType.FOLDER.value)))
monkeypatch.setattr(module.FileService, "move_file", lambda *_args, **_kwargs: None)
res = _run(module.move.__wrapped__("tenant1"))
assert res["code"] == 0
assert res["data"] is True
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pid: (False, None))
res = _run(module.move.__wrapped__("tenant1"))
assert res["code"] == 404
assert res["message"] == "Parent Folder not found!"
def test_move_missing_payload(self, monkeypatch):
module = _load_files_app(monkeypatch)
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.move.__wrapped__("tenant1"))
assert res["code"] == 100
def test_move_missing_source_branch(self, monkeypatch):
module = _load_files_app(monkeypatch)
async def fake_request_json():
return {"src_file_ids": ["file1"], "dest_file_id": "parent1"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_FalsyFile("file1", module.FileType.DOC.value)])
res = _run(module.move.__wrapped__("tenant1"))
assert res["code"] == 404
assert res["message"] == "File or Folder not found!"
@pytest.mark.p2
class TestFileConvertUnit:
def test_convert_success_and_delete(self, monkeypatch):
module = _load_files_app(monkeypatch)
file_id = "file1"
kb_id = "kb1"
async def fake_request_json():
return {"kb_ids": [kb_id], "file_ids": [file_id]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile(file_id, module.FileType.DOC.value)])
class _Inform:
document_id = "doc1"
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _id: [_Inform()])
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, _DummyFile("doc1", module.FileType.DOC.value)))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant1")
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.File2DocumentService, "delete_by_file_id", lambda *_args, **_kwargs: None)
class _Kb:
id = kb_id
parser_id = "parser"
parser_config = {}
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _Kb()))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _id: (True, _DummyFile(file_id, module.FileType.DOC.value)))
class _Doc:
def __init__(self, doc_id):
self.id = doc_id
monkeypatch.setattr(module.DocumentService, "insert", lambda _doc: _Doc("newdoc"))
class _File2Doc:
def to_json(self):
return {"file_id": file_id, "document_id": "newdoc"}
monkeypatch.setattr(module.File2DocumentService, "insert", lambda _data: _File2Doc())
res = _run(module.convert.__wrapped__("tenant1"))
assert res["code"] == 0
assert res["data"]
def test_convert_folder(self, monkeypatch):
module = _load_files_app(monkeypatch)
kb_id = "kb1"
async def fake_request_json():
return {"kb_ids": [kb_id], "file_ids": ["folder1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile("folder1", module.FileType.FOLDER.value, name="folder")])
monkeypatch.setattr(module.FileService, "get_all_innermost_file_ids", lambda *_args, **_kwargs: ["inner1"])
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _id: [])
monkeypatch.setattr(module.File2DocumentService, "delete_by_file_id", lambda *_args, **_kwargs: None)
class _Kb:
id = kb_id
parser_id = "parser"
parser_config = {}
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _Kb()))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _id: (True, _DummyFile("inner1", module.FileType.DOC.value)))
monkeypatch.setattr(module.DocumentService, "insert", lambda _doc: _DummyFile("doc1", module.FileType.DOC.value))
monkeypatch.setattr(module.File2DocumentService, "insert", lambda _data: SimpleNamespace(to_json=lambda: {"file_id": "inner1"}))
res = _run(module.convert.__wrapped__("tenant1"))
assert res["code"] == 0
assert res["data"]
def test_convert_invalid_file_id(self, monkeypatch):
module = _load_files_app(monkeypatch)
async def fake_request_json():
return {"kb_ids": ["kb1"], "file_ids": ["missing"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_FalsyFile("missing", module.FileType.DOC.value)])
res = _run(module.convert.__wrapped__("tenant1"))
assert res["code"] == 404
assert res["message"] == "File not found!"
def test_convert_invalid_kb_id(self, monkeypatch):
module = _load_files_app(monkeypatch)
async def fake_request_json():
return {"kb_ids": ["missing"], "file_ids": ["file1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile("file1", module.FileType.DOC.value)])
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _id: [])
monkeypatch.setattr(module.File2DocumentService, "delete_by_file_id", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(module.convert.__wrapped__("tenant1"))
assert res["code"] == 404
assert res["message"] == "Can't find this dataset!"
def test_convert_file_missing_second_lookup(self, monkeypatch):
module = _load_files_app(monkeypatch)
async def fake_request_json():
return {"kb_ids": ["kb1"], "file_ids": ["file1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile("file1", module.FileType.DOC.value)])
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _id: [])
monkeypatch.setattr(module.File2DocumentService, "delete_by_file_id", lambda *_args, **_kwargs: None)
class _Kb:
id = "kb1"
parser_id = "parser"
parser_config = {}
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _Kb()))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _id: (False, None))
res = _run(module.convert.__wrapped__("tenant1"))
assert res["code"] == 404
assert res["message"] == "Can't find this file!"
def test_convert_missing_payload(self, monkeypatch):
module = _load_files_app(monkeypatch)
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
with pytest.raises(KeyError):
_run(module.convert.__wrapped__("tenant1"))
@pytest.mark.p2
class TestFileRouteBranchUnit:
def test_upload_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _tenant_id: {"id": "root"})
# Missing file part.
monkeypatch.setattr(module, "request", _DummyRequest(form={}, files=_DummyFiles()))
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.BAD_REQUEST
assert res["message"] == "No file part!"
# Empty filename.
monkeypatch.setattr(
module,
"request",
_DummyRequest(form={"parent_id": "pf1"}, files=_DummyFiles([_DummyUploadFile("")])),
)
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.BAD_REQUEST
assert res["message"] == "No selected file!"
# Parent folder missing.
monkeypatch.setattr(
module,
"request",
_DummyRequest(form={"parent_id": "pf1"}, files=_DummyFiles([_DummyUploadFile("a.txt")])),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Can't find this folder!"
# Missing folder in branch: file_len != len_id_list.
monkeypatch.setattr(
module,
"request",
_DummyRequest(form={"parent_id": "pf1"}, files=_DummyFiles([_DummyUploadFile("dir/a.txt")])),
)
monkeypatch.setattr(module.FileService, "get_id_list_by_id", lambda *_args, **_kwargs: ["pf1", "missing-child"])
def get_by_id_missing_child(file_id):
if file_id == "missing-child":
return False, None
return True, SimpleNamespace(id="pf1")
monkeypatch.setattr(module.FileService, "get_by_id", get_by_id_missing_child)
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Folder not found!"
# Missing folder in branch: file_len == len_id_list.
monkeypatch.setattr(
module,
"request",
_DummyRequest(form={"parent_id": "pf1"}, files=_DummyFiles([_DummyUploadFile("b.txt")])),
)
monkeypatch.setattr(module.FileService, "get_id_list_by_id", lambda *_args, **_kwargs: ["pf1", "leaf"])
pf1_calls = {"count": 0}
def get_by_id_missing_parent_in_else(file_id):
if file_id == "pf1":
pf1_calls["count"] += 1
if pf1_calls["count"] == 1:
return True, SimpleNamespace(id="pf1")
return False, None
return True, SimpleNamespace(id=file_id)
monkeypatch.setattr(module.FileService, "get_by_id", get_by_id_missing_parent_in_else)
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Folder not found!"
class _Storage:
def __init__(self):
self.obj_calls = 0
self.put_calls = []
def obj_exist(self, _bucket, _location):
self.obj_calls += 1
return self.obj_calls == 1
def put(self, bucket, location, blob):
self.put_calls.append((bucket, location, blob))
storage = _Storage()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage)
monkeypatch.setattr(
module,
"request",
_DummyRequest(
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("dir/a.txt", b"a"), _DummyUploadFile("b.txt", b"b")]),
),
)
def fake_get_by_id(file_id):
if file_id == "mid-id":
return True, SimpleNamespace(id="mid-id")
return True, SimpleNamespace(id="pf1")
def fake_get_id_list_by_id(_pf_id, file_obj_names, _idx, _ids):
if file_obj_names[-1] == "a.txt":
return ["pf1", "mid-id"]
return ["pf1", "leaf-id"]
def fake_create_folder(_file, parent_id, _file_obj_names, _len_id_list):
return SimpleNamespace(id=f"{parent_id}-folder")
monkeypatch.setattr(module.FileService, "get_by_id", fake_get_by_id)
monkeypatch.setattr(module.FileService, "get_id_list_by_id", fake_get_id_list_by_id)
monkeypatch.setattr(module.FileService, "create_folder", fake_create_folder)
monkeypatch.setattr(module, "filename_type", lambda _name: module.FileType.DOC.value)
monkeypatch.setattr(module, "duplicate_name", lambda _query, **kwargs: kwargs["name"])
monkeypatch.setattr(module, "get_uuid", lambda: "file-id")
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.FileService, "insert", lambda data: SimpleNamespace(to_json=lambda: {"id": data["id"]}))
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert len(res["data"]) == 2
assert storage.put_calls
# Exception path.
monkeypatch.setattr(
module,
"request",
_DummyRequest(form={"parent_id": "pf1"}, files=_DummyFiles([_DummyUploadFile("boom.txt")])),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("upload boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.upload.__wrapped__("tenant1"))
assert res["code"] == 500
assert "upload boom" in res["message"]
def test_create_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
state = {"req": {"name": "file1"}}
async def fake_request_json():
return state["req"]
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _tenant_id: {"id": "root"})
monkeypatch.setattr(module.FileService, "is_parent_folder_exist", lambda _pf_id: False)
res = _run(module.create.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.BAD_REQUEST
assert "Parent Folder Doesn't Exist!" in res["message"]
state["req"] = {"name": "dup", "parent_id": "pf1"}
monkeypatch.setattr(module.FileService, "is_parent_folder_exist", lambda _pf_id: True)
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [object()])
res = _run(module.create.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.CONFLICT
assert "Duplicated folder name" in res["message"]
inserted = {}
def fake_insert(data):
inserted["payload"] = data
return SimpleNamespace(to_json=lambda: data)
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "get_uuid", lambda: "uuid-folder")
monkeypatch.setattr(module.FileService, "insert", fake_insert)
state["req"] = {"name": "folder", "parent_id": "pf1", "type": module.FileType.FOLDER.value}
res = _run(module.create.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert inserted["payload"]["type"] == module.FileType.FOLDER.value
state["req"] = {"name": "virtual", "parent_id": "pf1", "type": "UNKNOWN"}
res = _run(module.create.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert inserted["payload"]["type"] == module.FileType.VIRTUAL.value
monkeypatch.setattr(module.FileService, "is_parent_folder_exist", lambda _pf_id: (_ for _ in ()).throw(RuntimeError("create boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.create.__wrapped__("tenant1"))
assert res["code"] == 500
assert "create boom" in res["message"]
def test_list_files_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
calls = {"init": 0}
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _tenant_id: {"id": "root"})
monkeypatch.setattr(
module.FileService,
"init_knowledgebase_docs",
lambda _pf_id, _tenant_id: calls.__setitem__("init", calls["init"] + 1),
)
monkeypatch.setattr(module, "request", _DummyRequest(args={}))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pf_id: (False, None))
res = _run(module.list_files.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Folder not found!"
assert calls["init"] == 1
monkeypatch.setattr(
module,
"request",
_DummyRequest(args={"parent_id": "p1", "keywords": "k", "page": "2", "page_size": "10", "orderby": "name", "desc": "False"}),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pf_id: (True, SimpleNamespace(id="p1")))
monkeypatch.setattr(module.FileService, "get_by_pf_id", lambda *_args, **_kwargs: ([{"id": "f1"}], 1))
monkeypatch.setattr(module.FileService, "get_parent_folder", lambda _pf_id: None)
res = _run(module.list_files.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "File not found!"
monkeypatch.setattr(module.FileService, "get_parent_folder", lambda _pf_id: SimpleNamespace(to_json=lambda: {"id": "p0"}))
res = _run(module.list_files.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["total"] == 1
assert res["data"]["parent_folder"]["id"] == "p0"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pf_id: (_ for _ in ()).throw(RuntimeError("list boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.list_files.__wrapped__("tenant1"))
assert res["code"] == 500
assert "list boom" in res["message"]
def test_get_root_folder_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _tenant_id: {"id": "root"})
res = _run(module.get_root_folder.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["root_folder"]["id"] == "root"
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _tenant_id: (_ for _ in ()).throw(RuntimeError("root boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.get_root_folder.__wrapped__("tenant1"))
assert res["code"] == 500
assert "root boom" in res["message"]
def test_get_parent_folder_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(args={"file_id": "missing"}))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.get_parent_folder.__wrapped__())
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Folder not found!"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="f1")))
monkeypatch.setattr(module.FileService, "get_parent_folder", lambda _file_id: SimpleNamespace(to_json=lambda: {"id": "p1"}))
res = _run(module.get_parent_folder.__wrapped__())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["parent_folder"]["id"] == "p1"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("parent boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.get_parent_folder.__wrapped__())
assert res["code"] == 500
assert "parent boom" in res["message"]
def test_get_all_parent_folders_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(args={"file_id": "missing"}))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.get_all_parent_folders.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Folder not found!"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="f1")))
monkeypatch.setattr(
module.FileService,
"get_all_parent_folders",
lambda _file_id: [SimpleNamespace(to_json=lambda: {"id": "p1"})],
)
res = _run(module.get_all_parent_folders.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["parent_folders"] == [{"id": "p1"}]
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("all parent boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.get_all_parent_folders.__wrapped__("tenant1"))
assert res["code"] == 500
assert "all parent boom" in res["message"]
def test_rm_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
req_state = {"file_ids": ["f1"]}
async def fake_request_json():
return req_state
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.settings, "STORAGE_IMPL", SimpleNamespace(rm=lambda *_args, **_kwargs: None))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "File or Folder not found!"
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile(_file_id, module.FileType.DOC.value, tenant_id=None)),
)
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Tenant not found!"
req_state["file_ids"] = ["folder1"]
def folder_missing_inner(file_id):
if file_id == "folder1":
return True, _DummyFile("folder1", module.FileType.FOLDER.value, parent_id="pf1")
if file_id == "inner1":
return False, None
return False, None
monkeypatch.setattr(module.FileService, "get_by_id", folder_missing_inner)
monkeypatch.setattr(module.FileService, "get_all_innermost_file_ids", lambda _file_id, _acc: ["inner1"])
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "File not found!"
req_state["file_ids"] = ["doc1"]
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("doc1", module.FileType.DOC.value, parent_id="pf1")),
)
monkeypatch.setattr(module.FileService, "delete", lambda _file: False)
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SERVER_ERROR
assert "Database error (File removal)!" in res["message"]
class _Inform:
document_id = "doc1"
monkeypatch.setattr(module.FileService, "delete", lambda _file: True)
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [_Inform()])
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Document not found!"
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(id=_doc_id)))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: None)
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant1")
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: False)
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SERVER_ERROR
assert "Database error (Document removal)!" in res["message"]
req_state["file_ids"] = ["folder-ok"]
deleted = {"folder": 0, "link": 0}
def folder_success(file_id):
if file_id == "folder-ok":
return True, _DummyFile("folder-ok", module.FileType.FOLDER.value, parent_id="pf1")
if file_id == "inner-ok":
return True, _DummyFile("inner-ok", module.FileType.DOC.value, parent_id="pf1", location="inner.bin")
return False, None
monkeypatch.setattr(module.FileService, "get_by_id", folder_success)
monkeypatch.setattr(module.FileService, "get_all_innermost_file_ids", lambda _file_id, _acc: ["inner-ok"])
monkeypatch.setattr(
module.FileService,
"delete_folder_by_pf_id",
lambda _tenant_id, _file_id: deleted.__setitem__("folder", deleted["folder"] + 1),
)
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [])
monkeypatch.setattr(
module.File2DocumentService,
"delete_by_file_id",
lambda _file_id: deleted.__setitem__("link", deleted["link"] + 1),
)
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
assert deleted == {"folder": 1, "link": 1}
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("rm boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
req_state["file_ids"] = ["boom"]
res = _run(module.rm.__wrapped__("tenant1"))
assert res["code"] == 500
assert "rm boom" in res["message"]
def test_rename_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
req_state = {"file_id": "f1", "name": "new.txt"}
async def fake_request_json():
return req_state
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "File not found!"
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("f1", module.FileType.DOC.value, name="origin.txt")),
)
req_state["name"] = "new.pdf"
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.BAD_REQUEST
assert "extension of file can't be changed" in res["message"]
req_state["name"] = "new.txt"
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [SimpleNamespace(name="new.txt")])
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.CONFLICT
assert "Duplicated file name in the same folder." in res["message"]
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.FileService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SERVER_ERROR
assert "Database error (File rename)!" in res["message"]
monkeypatch.setattr(module.FileService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [SimpleNamespace(document_id="doc1")])
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SERVER_ERROR
assert "Database error (Document rename)!" in res["message"]
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [])
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("rename boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.rename.__wrapped__("tenant1"))
assert res["code"] == 500
assert "rename boom" in res["message"]
def test_get_file_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.get.__wrapped__("tenant1", "missing"))
assert res["code"] == module.RetCode.NOT_FOUND
assert res["message"] == "Document not found!"
class _Storage:
def __init__(self):
self.calls = 0
def get(self, _bucket, _location):
self.calls += 1
if self.calls == 1:
return None
return b"blob-data"
storage = _Storage()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage)
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("f1", module.FileType.VISUAL.value, name="image.abc", parent_id="pf1", location="loc1")),
)
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("pf2", "loc2"))
async def fake_make_response(data):
return _DummyResponse(data)
monkeypatch.setattr(module, "make_response", fake_make_response)
monkeypatch.setattr(
module,
"apply_safe_file_response_headers",
lambda response, content_type, extension: response.headers.update(
{"content_type": content_type, "extension": extension}
),
)
res = _run(module.get.__wrapped__("tenant1", "f1"))
assert isinstance(res, _DummyResponse)
assert res.data == b"blob-data"
assert res.headers["extension"] == "abc"
assert res.headers["content_type"] == "image/abc"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("get boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.get.__wrapped__("tenant1", "f1"))
assert res["code"] == 500
assert "get boom" in res["message"]
def test_download_attachment_branch_matrix(self, monkeypatch):
module = _load_files_app(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(args={"ext": "abc"}))
async def fake_thread_pool_exec(_fn, _tenant_id, _attachment_id):
return b"attachment"
async def fake_make_response(data):
return _DummyResponse(data)
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
monkeypatch.setattr(module, "make_response", fake_make_response)
monkeypatch.setattr(
module,
"apply_safe_file_response_headers",
lambda response, content_type, extension: response.headers.update(
{"content_type": content_type, "extension": extension}
),
)
res = _run(module.download_attachment.__wrapped__("tenant1", "att1"))
assert isinstance(res, _DummyResponse)
assert res.data == b"attachment"
assert res.headers["extension"] == "abc"
assert res.headers["content_type"] == "application/abc"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_app/test_file_routes.py",
"license": "Apache License 2.0",
"lines": 842,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_doc_sdk_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import numpy as np
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyFiles(dict):
def getlist(self, key):
return self.get(key, [])
class _DummyArgs(dict):
def getlist(self, key):
v = self.get(key, [])
if v is None:
return []
if isinstance(v, list):
return v
return [v]
class _DummyDoc:
def __init__(
self,
*,
doc_id="doc-1",
kb_id="kb-1",
name="doc.txt",
chunk_num=1,
token_num=2,
progress=0,
process_duration=0,
parser_id="naive",
doc_type=1,
status=True,
run=0,
):
self.id = doc_id
self.kb_id = kb_id
self.name = name
self.chunk_num = chunk_num
self.token_num = token_num
self.progress = progress
self.process_duration = process_duration
self.parser_id = parser_id
self.type = doc_type
self.status = status
self.run = run
def to_dict(self):
return {
"id": self.id,
"kb_id": self.kb_id,
"name": self.name,
"chunk_num": self.chunk_num,
"token_num": self.token_num,
"progress": self.progress,
"process_duration": self.process_duration,
"parser_id": self.parser_id,
"run": self.run,
"status": self.status,
}
class _ToggleBoolDocList:
def __init__(self, value):
self._calls = 0
self._value = value
def __getitem__(self, item):
return self._value
def __bool__(self):
self._calls += 1
return self._calls == 1
def _run(coro):
return asyncio.run(coro)
def _load_doc_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
deepdoc_pkg = ModuleType("deepdoc")
deepdoc_parser_pkg = ModuleType("deepdoc.parser")
deepdoc_parser_pkg.__path__ = []
class _StubPdfParser:
pass
class _StubExcelParser:
pass
class _StubDocxParser:
pass
deepdoc_parser_pkg.PdfParser = _StubPdfParser
deepdoc_parser_pkg.ExcelParser = _StubExcelParser
deepdoc_parser_pkg.DocxParser = _StubDocxParser
deepdoc_pkg.parser = deepdoc_parser_pkg
monkeypatch.setitem(sys.modules, "deepdoc", deepdoc_pkg)
monkeypatch.setitem(sys.modules, "deepdoc.parser", deepdoc_parser_pkg)
deepdoc_excel_module = ModuleType("deepdoc.parser.excel_parser")
deepdoc_excel_module.RAGFlowExcelParser = _StubExcelParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.excel_parser", deepdoc_excel_module)
deepdoc_parser_utils = ModuleType("deepdoc.parser.utils")
deepdoc_parser_utils.get_text = lambda *_args, **_kwargs: ""
monkeypatch.setitem(sys.modules, "deepdoc.parser.utils", deepdoc_parser_utils)
monkeypatch.setitem(sys.modules, "xgboost", ModuleType("xgboost"))
module_path = repo_root / "api" / "apps" / "sdk" / "doc.py"
spec = importlib.util.spec_from_file_location("test_doc_sdk_routes_unit", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
def _patch_send_file(monkeypatch, module):
async def _fake_send_file(file_obj, **kwargs):
return {"file": file_obj, "filename": kwargs.get("attachment_filename")}
monkeypatch.setattr(module, "send_file", _fake_send_file)
def _patch_storage(monkeypatch, module, *, file_stream=b"abc"):
storage = SimpleNamespace(get=lambda *_args, **_kwargs: file_stream, rm=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage)
def _patch_docstore(monkeypatch, module, **kwargs):
defaults = {
"delete": lambda *_args, **_kwargs: 0,
"update": lambda *_args, **_kwargs: None,
"get": lambda *_args, **_kwargs: {},
"insert": lambda *_args, **_kwargs: None,
"index_exist": lambda *_args, **_kwargs: False,
}
defaults.update(kwargs)
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(**defaults))
@pytest.mark.p2
class TestDocRoutesUnit:
def test_chunk_positions_validation_error(self, monkeypatch):
module = _load_doc_module(monkeypatch)
with pytest.raises(ValueError) as exc_info:
module.Chunk(positions=[[1, 2, 3, 4]])
assert "length of 5" in str(exc_info.value)
def test_upload_validation_and_upload_error(self, monkeypatch):
module = _load_doc_module(monkeypatch)
class _FileObj:
def __init__(self, name):
self.filename = name
monkeypatch.setattr(module, "request", SimpleNamespace(form=_AwaitableValue({}), files=_AwaitableValue(_DummyFiles({"file": [_FileObj("")]}))))
res = _run(module.upload.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert res["message"] == "No file selected!"
long_name = "a" * (module.FILE_NAME_LEN_LIMIT + 1)
monkeypatch.setattr(module, "request", SimpleNamespace(form=_AwaitableValue({}), files=_AwaitableValue(_DummyFiles({"file": [_FileObj(long_name)]}))))
res = _run(module.upload.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "bytes or less" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(form=_AwaitableValue({}), files=_AwaitableValue(_DummyFiles({"file": [_FileObj("ok.txt")]}))))
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (True, SimpleNamespace()))
monkeypatch.setattr(module.FileService, "upload_document", lambda *_args, **_kwargs: (["upload failed"], []))
res = _run(module.upload.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == module.RetCode.SERVER_ERROR
assert res["message"] == "upload failed"
def test_update_doc_guards_and_error_paths(self, monkeypatch):
module = _load_doc_module(monkeypatch)
doc = _DummyDoc()
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "You don't own the dataset."
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [1])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (False, None))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Can't find this dataset!"
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (True, SimpleNamespace(tenant_id="tenant-1")))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "doesn't own the document" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [doc])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"chunk_count": 100}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "chunk_count" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"token_count": 100}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "token_count" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"progress": 100}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "progress" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"meta_fields": []}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "meta_fields must be a dictionary"
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"meta_fields": {"k": "v"}}))
monkeypatch.setattr(module.DocMetadataService, "update_document_metadata", lambda *_args, **_kwargs: False)
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Failed to update metadata"
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"name": "a" * (module.FILE_NAME_LEN_LIMIT + 1)}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "bytes or less" in res["message"]
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"name": "new.txt"}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "Document rename" in res["message"]
def test_update_doc_chunk_method_enabled_and_db_error(self, monkeypatch):
module = _load_doc_module(monkeypatch)
visual_doc = _DummyDoc(parser_id="naive", doc_type=module.FileType.VISUAL)
kb = SimpleNamespace(tenant_id="tenant-1")
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [1])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (True, kb))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [visual_doc])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"chunk_method": "naive"}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Not supported yet!"
doc = _DummyDoc(token_num=2, chunk_num=1, parser_id="naive")
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [doc])
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"chunk_method": "manual"}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Document not found!"
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "update_parser_config", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.DocumentService, "increment_chunk_num", lambda *_args, **_kwargs: False)
_patch_docstore(monkeypatch, module, delete=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"chunk_method": "manual"}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Document not found!"
monkeypatch.setattr(module.DocumentService, "increment_chunk_num", lambda *_args, **_kwargs: True)
doc_for_enabled = _DummyDoc(status=False)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [doc_for_enabled])
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _id: (True, doc_for_enabled))
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: False)
_patch_docstore(monkeypatch, module, update=lambda *_args, **_kwargs: None, delete=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"enabled": True}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "Document update" in res["message"]
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
_patch_docstore(monkeypatch, module, update=lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("boom")), delete=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["code"] == 500
assert "boom" in res["message"]
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _id: (False, None))
_patch_docstore(monkeypatch, module, update=lambda *_args, **_kwargs: None, delete=lambda *_args, **_kwargs: None)
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Dataset created failed"
# cover token reset + docStore deletion branch
doc_reset = _DummyDoc(token_num=3, chunk_num=2, parser_id="naive", run=0)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [doc_reset])
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "increment_chunk_num", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _id: (True, doc_reset))
_patch_docstore(monkeypatch, module, delete=lambda *_args, **_kwargs: None, update=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"chunk_method": "manual"}))
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["code"] == 0
def _raise_operational_error(_id):
raise module.OperationalError("db down")
monkeypatch.setattr(module.DocumentService, "get_by_id", _raise_operational_error)
res = _run(module.update_doc.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "Database operation failed"
def test_download_and_download_doc_errors(self, monkeypatch):
module = _load_doc_module(monkeypatch)
_patch_send_file(monkeypatch, module)
_patch_storage(monkeypatch, module, file_stream=b"")
res = _run(module.download.__wrapped__("tenant-1", "ds-1", ""))
assert res["message"] == "Specify document_id please."
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
res = _run(module.download.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "do not own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [1])
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
res = _run(module.download.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "not own the document" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc()])
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("b", "n"))
res = _run(module.download.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["message"] == "This file is empty."
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(module.download_doc("doc-1"))
assert "Authorization is not valid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer token"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(module.download_doc("doc-1"))
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace()])
res = _run(module.download_doc(""))
assert res["message"] == "Specify document_id please."
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
res = _run(module.download_doc("doc-1"))
assert "not own the document" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc()])
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("b", "n"))
_patch_storage(monkeypatch, module, file_stream=b"")
res = _run(module.download_doc("doc-1"))
assert res["message"] == "This file is empty."
_patch_storage(monkeypatch, module, file_stream=b"abc")
res = _run(module.download_doc("doc-1"))
assert res["filename"] == "doc.txt"
def test_list_docs_metadata_filters(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs()))
res = module.list_docs.__wrapped__("ds-1", "tenant-1")
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(
module,
"request",
SimpleNamespace(
args=_DummyArgs(
{
"metadata_condition": "{bad json",
}
)
),
)
res = module.list_docs.__wrapped__("ds-1", "tenant-1")
assert res["message"] == "metadata_condition must be valid JSON."
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs({"metadata_condition": "[1]"})))
res = module.list_docs.__wrapped__("ds-1", "tenant-1")
assert res["message"] == "metadata_condition must be an object."
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kbs: [{"doc_id": "x"}])
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: [])
monkeypatch.setattr(module, "convert_conditions", lambda cond: cond)
monkeypatch.setattr(
module,
"request",
SimpleNamespace(args=_DummyArgs({"metadata_condition": '{"conditions":[{"field":"x","op":"eq","value":"y"}]}'})),
)
res = module.list_docs.__wrapped__("ds-1", "tenant-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["total"] == 0
monkeypatch.setattr(
module.DocumentService,
"get_list",
lambda *_args, **_kwargs: ([{"id": "doc-1", "create_time": 100, "run": "0"}], 1),
)
monkeypatch.setattr(
module,
"request",
SimpleNamespace(
args=_DummyArgs(
{
"create_time_from": "101",
"create_time_to": "200",
}
)
),
)
res = module.list_docs.__wrapped__("ds-1", "tenant-1")
assert res["code"] == 0
assert res["data"]["docs"] == []
def test_metadata_summary_and_batch_update(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module, "convert_conditions", lambda cond: cond)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"selector": {}}))
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.metadata_summary.__wrapped__("ds-1", "tenant-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"doc_ids": ["d1"]}))
monkeypatch.setattr(module.DocMetadataService, "get_metadata_summary", lambda *_args, **_kwargs: {"k": 1})
res = _run(module.metadata_summary.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == 0
assert res["data"]["summary"] == {"k": 1}
monkeypatch.setattr(module.DocMetadataService, "get_metadata_summary", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("x")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.metadata_summary.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == 500
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"selector": [1]}))
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert res["message"] == "selector must be an object."
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"selector": {}, "updates": {"k": "v"}, "deletes": []}))
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert res["message"] == "updates and deletes must be lists."
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"selector": {"metadata_condition": [1]}, "updates": [], "deletes": []}),
)
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert res["message"] == "metadata_condition must be an object."
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"selector": {"document_ids": "doc-1"}, "updates": [], "deletes": []}),
)
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert res["message"] == "document_ids must be a list."
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"selector": {}, "updates": [{"key": ""}], "deletes": []}),
)
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert "Each update requires key and value." in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"selector": {}, "updates": [], "deletes": [{"x": "y"}]}),
)
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert "Each delete requires key." in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"selector": {"document_ids": ["bad"], "metadata_condition": {"conditions": []}},
"updates": [{"key": "k", "value": "v"}],
"deletes": [],
}
),
)
monkeypatch.setattr(module.KnowledgebaseService, "list_documents_by_ids", lambda _ids: ["doc-1"])
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert "do not belong to dataset" in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"selector": {"document_ids": ["doc-1"], "metadata_condition": {"conditions": [{"f": "x"}]}},
"updates": [{"key": "k", "value": "v"}],
"deletes": [],
}
),
)
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: [])
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kbs: [])
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == 0
assert res["data"]["updated"] == 0
assert res["data"]["matched_docs"] == 0
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: ["doc-1"])
monkeypatch.setattr(module.DocMetadataService, "batch_update_metadata", lambda *_args, **_kwargs: 1)
res = _run(module.metadata_batch_update.__wrapped__("ds-1", "tenant-1"))
assert res["code"] == 0
assert res["data"]["updated"] == 1
assert res["data"]["matched_docs"] == 1
def test_delete_branches(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.delete.__wrapped__("tenant-1", "ds-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["doc-1"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _tenant: {"id": "pf-1"})
monkeypatch.setattr(module.FileService, "init_knowledgebase_docs", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _id: (True, _DummyDoc()))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _id: None)
res = _run(module.delete.__wrapped__("tenant-1", "ds-1"))
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _id: "tenant-1")
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("b", "n"))
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: False)
res = _run(module.delete.__wrapped__("tenant-1", "ds-1"))
assert "Document removal" in res["message"]
def _raise_get_by_id(_id):
raise RuntimeError("boom")
monkeypatch.setattr(module.DocumentService, "get_by_id", _raise_get_by_id)
res = _run(module.delete.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == module.RetCode.SERVER_ERROR
assert "boom" in res["message"]
monkeypatch.setattr(module, "check_duplicate_ids", lambda _ids, _kind: ([], ["Duplicate document ids: doc-1"]))
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _id: (False, None))
res = _run(module.delete.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Duplicate document ids" in res["message"]
def test_parse_branches(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.parse.__wrapped__("tenant-1", "ds-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"document_ids": ["doc-1"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
toggle_doc = _ToggleBoolDocList(_DummyDoc(progress=0))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: toggle_doc)
res = _run(module.parse.__wrapped__("tenant-1", "ds-1"))
assert "don't own the document" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc(run=module.TaskStatus.RUNNING.value)])
monkeypatch.setattr(
module.DocumentService,
"update_by_id",
lambda *_args, **_kwargs: (_ for _ in ()).throw(AssertionError("update_by_id must not be called for running docs")),
)
res = _run(module.parse.__wrapped__("tenant-1", "ds-1"))
assert "currently being processed" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc(progress=0)])
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _id: (True, _DummyDoc()))
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("b", "n"))
_patch_docstore(monkeypatch, module, delete=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.TaskService, "filter_delete", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "queue_tasks", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, ["Duplicate document ids: doc-1"]))
res = _run(module.parse.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == 0
assert res["data"]["success_count"] == 1
assert "Duplicate document ids" in res["data"]["errors"][0]
monkeypatch.setattr(module, "check_duplicate_ids", lambda _ids, _kind: ([], ["Duplicate document ids: doc-1"]))
res = _run(module.parse.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Duplicate document ids" in res["message"]
def test_stop_parsing_branches(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert "`document_ids` is required" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"document_ids": ["doc-1"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert "don't own the document" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc(run=module.TaskStatus.DONE.value)])
monkeypatch.setattr(
module,
"cancel_all_task_of",
lambda *_args, **_kwargs: (_ for _ in ()).throw(AssertionError("cancel_all_task_of must not be called for non-running docs")),
)
monkeypatch.setattr(
module.DocumentService,
"update_by_id",
lambda *_args, **_kwargs: (_ for _ in ()).throw(AssertionError("update_by_id must not be called for non-running docs")),
)
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert res["data"]["error_code"] == module.DOC_STOP_PARSING_INVALID_STATE_ERROR_CODE
assert res["message"] == module.DOC_STOP_PARSING_INVALID_STATE_MESSAGE
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc(run=module.TaskStatus.RUNNING.value)])
monkeypatch.setattr(module, "cancel_all_task_of", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
_patch_docstore(monkeypatch, module, delete=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, ["Duplicate document ids: doc-1"]))
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == 0
assert res["data"]["success_count"] == 1
assert "Duplicate document ids" in res["data"]["errors"][0]
monkeypatch.setattr(module, "check_duplicate_ids", lambda _ids, _kind: ([], ["Duplicate document ids: doc-1"]))
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Duplicate document ids" in res["message"]
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc(run=module.TaskStatus.RUNNING.value)])
res = _run(module.stop_parsing.__wrapped__("tenant-1", "ds-1"))
assert res["code"] == 0
def test_list_chunks_branches(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.list_chunks.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
res = _run(module.list_chunks.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "don't own the document" in res["message"]
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [_DummyDoc()])
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs({"id": "chunk-1"})))
_patch_docstore(monkeypatch, module, get=lambda *_args, **_kwargs: None)
res = _run(module.list_chunks.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "Chunk not found" in res["message"]
_patch_docstore(monkeypatch, module, get=lambda *_args, **_kwargs: {"id_vec": [1], "content_with_weight_vec": [2]})
res = _run(module.list_chunks.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "Chunk `chunk-1` not found." in res["message"]
_patch_docstore(
monkeypatch,
module,
get=lambda *_args, **_kwargs: {
"chunk_id": "chunk-1",
"content_with_weight": "x",
"doc_id": "doc-1",
"docnm_kwd": "doc",
"position_int": [[1, 2, 3, 4, 5]],
},
)
res = _run(module.list_chunks.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["code"] == 0
assert res["data"]["total"] == 1
assert res["data"]["chunks"][0]["id"] == "chunk-1"
def test_add_chunk_access_guard(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.add_chunk.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "don't own the dataset" in res["message"]
def test_rm_chunk_branches(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.rm_chunk.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_ids", lambda _ids: [])
with pytest.raises(LookupError):
_run(module.rm_chunk.__wrapped__("tenant-1", "ds-1", "doc-1"))
monkeypatch.setattr(module.DocumentService, "get_by_ids", lambda _ids: [_DummyDoc()])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
_patch_docstore(monkeypatch, module, delete=lambda *_args, **_kwargs: 2)
monkeypatch.setattr(module.DocumentService, "decrement_chunk_num", lambda *_args, **_kwargs: None)
res = _run(module.rm_chunk.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["code"] == 0
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"chunk_ids": ["c1", "c1"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda _ids, _kind: (["c1"], ["Duplicate chunk ids: c1"]))
_patch_docstore(monkeypatch, module, delete=lambda *_args, **_kwargs: 1)
res = _run(module.rm_chunk.__wrapped__("tenant-1", "ds-1", "doc-1"))
assert res["code"] == 0
assert res["data"]["errors"] == ["Duplicate chunk ids: c1"]
def test_update_chunk_branches(self, monkeypatch):
module = _load_doc_module(monkeypatch)
_patch_docstore(monkeypatch, module, get=lambda *_args, **_kwargs: None)
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert "Can't find this chunk" in res["message"]
_patch_docstore(monkeypatch, module, get=lambda *_args, **_kwargs: {"content_with_weight": "q\na"})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert "don't own the document" in res["message"]
doc = _DummyDoc(parser_id="naive")
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [doc])
monkeypatch.setattr(module.rag_tokenizer, "tokenize", lambda text: text or "")
monkeypatch.setattr(module.rag_tokenizer, "fine_grained_tokenize", lambda text: text or "")
monkeypatch.setattr(module.rag_tokenizer, "is_chinese", lambda _text: False)
monkeypatch.setattr(module.DocumentService, "get_embd_id", lambda _doc_id: "embd")
class _EmbedModel:
def encode(self, _texts):
return [np.array([0.2, 0.8]), np.array([0.3, 0.7])], 1
monkeypatch.setattr(module.TenantLLMService, "model_instance", lambda *_args, **_kwargs: _EmbedModel())
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"positions": "bad"}))
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert "`positions` should be a list" in res["message"]
_patch_docstore(monkeypatch, module, get=lambda *_args, **_kwargs: {"content_with_weight": "x"}, update=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"positions": [[1, 2, 3, 4, 5]]}))
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert res["code"] == 0
qa_doc = _DummyDoc(parser_id=module.ParserType.QA)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [qa_doc])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"content": "no-separator"}))
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert "Q&A must be separated" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"content": "Q?\nA!"}))
_patch_docstore(monkeypatch, module, get=lambda *_args, **_kwargs: {"content_with_weight": "Q?\nA!"}, update=lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "beAdoc", lambda d, *_args, **_kwargs: d)
res = _run(module.update_chunk.__wrapped__("tenant-1", "ds-1", "doc-1", "chunk-1"))
assert res["code"] == 0
def test_retrieval_validation_matrix(self, monkeypatch):
module = _load_doc_module(monkeypatch)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"dataset_ids": "bad"}))
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "`dataset_ids` should be a list" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"dataset_ids": ["ds-1"]}))
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: False)
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "don't own the dataset" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [SimpleNamespace(embd_id="m1"), SimpleNamespace(embd_id="m2")])
monkeypatch.setattr(module.TenantLLMService, "split_model_name_and_factory", lambda embd_id: (embd_id, "f"))
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "different embedding models" in res["message"]
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [SimpleNamespace(embd_id="m1", tenant_id="tenant-1")])
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "`question` is required." in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": " "}),
)
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert res["code"] == 0
assert res["data"]["chunks"] == []
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "document_ids": "bad"}),
)
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "`documents` should be a list" in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "document_ids": ["not-owned"]}),
)
monkeypatch.setattr(module.KnowledgebaseService, "list_documents_by_ids", lambda _ids: ["doc-1"])
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "don't own the document" in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "metadata_condition": {"logic": "and"}}),
)
monkeypatch.setattr(module.DocMetadataService, "get_meta_by_kbs", lambda _ids: [])
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: [])
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "code" in res
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "highlight": "True"}),
)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [SimpleNamespace(embd_id="m1", tenant_id="tenant-1")])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (True, SimpleNamespace(tenant_id="tenant-1", embd_id="m1")))
class _Retriever:
async def retrieval(self, *_args, **_kwargs):
return {"chunks": [], "total": 0}
def retrieval_by_children(self, chunks, *_args, **_kwargs):
return chunks
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: SimpleNamespace())
monkeypatch.setattr(module, "label_question", lambda *_args, **_kwargs: {})
monkeypatch.setattr(module.settings, "retriever", _Retriever())
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert res["code"] == 0
assert res["data"]["chunks"] == []
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "highlight": True}),
)
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert res["code"] == 0
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "highlight": "yes"}),
)
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "`highlight` should be a boolean" in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q", "highlight": 1}),
)
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "`highlight` should be a boolean" in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q"}),
)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (False, None))
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert "Dataset not found!" in res["message"]
feature_calls = {"cross": None, "keyword": None, "retrieval_question": None}
async def _cross_languages(_tenant_id, _dialog, question, langs):
feature_calls["cross"] = tuple(langs)
return f"{question}-xl"
async def _keyword_extraction(_chat_mdl, question):
feature_calls["keyword"] = question
return "-kw"
class _FeatureRetriever:
async def retrieval(self, question, *_args, **_kwargs):
feature_calls["retrieval_question"] = question
return {
"chunks": [
{
"chunk_id": "c1",
"content_with_weight": "content",
"doc_id": "doc-1",
"kb_id": "ds-1",
"vector": [1, 2],
}
],
"total": 1,
}
async def retrieval_by_toc(self, question, chunks, tenant_ids, _chat_mdl, size):
assert question == "q-xl-kw"
assert chunks and tenant_ids
assert size == 30
return [
{
"chunk_id": "toc-1",
"content_with_weight": "toc content",
"doc_id": "doc-toc",
"kb_id": "ds-1",
}
]
def retrieval_by_children(self, chunks, _tenant_ids):
return chunks + [
{
"chunk_id": "child-1",
"content_with_weight": "child content",
"doc_id": "doc-child",
"kb_id": "ds-1",
}
]
class _FeatureKgRetriever:
async def retrieval(self, *_args, **_kwargs):
return {
"chunk_id": "kg-1",
"content_with_weight": "kg content",
"doc_id": "doc-kg",
"kb_id": "ds-1",
}
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"dataset_ids": ["ds-1"],
"question": "q",
"rerank_id": "rerank-1",
"cross_languages": ["fr"],
"keyword": True,
"toc_enhance": True,
"use_kg": True,
}
),
)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _id: (True, SimpleNamespace(tenant_id="tenant-1", embd_id="m1")))
monkeypatch.setattr(module, "cross_languages", _cross_languages)
monkeypatch.setattr(module, "keyword_extraction", _keyword_extraction)
monkeypatch.setattr(module.settings, "retriever", _FeatureRetriever())
monkeypatch.setattr(module.settings, "kg_retriever", _FeatureKgRetriever())
monkeypatch.setattr(module, "label_question", lambda *_args, **_kwargs: {})
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: SimpleNamespace())
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert res["code"] == 0
assert feature_calls["cross"] == ("fr",)
assert feature_calls["keyword"] == "q-xl"
assert feature_calls["retrieval_question"] == "q-xl-kw"
assert res["data"]["chunks"][0]["id"] == "kg-1"
assert res["data"]["chunks"][0]["content"] == "kg content"
assert any(chunk["id"] == "toc-1" for chunk in res["data"]["chunks"])
assert any(chunk["id"] == "child-1" for chunk in res["data"]["chunks"])
class _NotFoundRetriever:
async def retrieval(self, *_args, **_kwargs):
raise Exception("boom not_found boom")
def retrieval_by_children(self, chunks, *_args, **_kwargs):
return chunks
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"dataset_ids": ["ds-1"], "question": "q"}),
)
monkeypatch.setattr(module.settings, "retriever", _NotFoundRetriever())
res = _run(module.retrieval_test.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "No chunk found! Check the chunk status please!" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_doc_sdk_routes_unit.py",
"license": "Apache License 2.0",
"lines": 855,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_session_sdk_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import json
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if value is None or type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _StubHeaders:
def __init__(self):
self._items = []
def add_header(self, key, value):
self._items.append((key, value))
def get(self, key, default=None):
for existing_key, value in reversed(self._items):
if existing_key == key:
return value
return default
class _StubResponse:
def __init__(self, body, mimetype=None, content_type=None):
self.body = body
self.mimetype = mimetype
self.content_type = content_type
self.headers = _StubHeaders()
class _DummyUploadFile:
def __init__(self, filename):
self.filename = filename
self.saved_path = None
async def save(self, path):
self.saved_path = path
def _run(coro):
return asyncio.run(coro)
async def _collect_stream(body):
items = []
if hasattr(body, "__aiter__"):
async for item in body:
if isinstance(item, bytes):
item = item.decode("utf-8")
items.append(item)
else:
for item in body:
if isinstance(item, bytes):
item = item.decode("utf-8")
items.append(item)
return items
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_session_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
deepdoc_pkg = ModuleType("deepdoc")
deepdoc_parser_pkg = ModuleType("deepdoc.parser")
deepdoc_parser_pkg.__path__ = []
class _StubPdfParser:
pass
class _StubExcelParser:
pass
class _StubDocxParser:
pass
deepdoc_parser_pkg.PdfParser = _StubPdfParser
deepdoc_parser_pkg.ExcelParser = _StubExcelParser
deepdoc_parser_pkg.DocxParser = _StubDocxParser
deepdoc_pkg.parser = deepdoc_parser_pkg
monkeypatch.setitem(sys.modules, "deepdoc", deepdoc_pkg)
monkeypatch.setitem(sys.modules, "deepdoc.parser", deepdoc_parser_pkg)
deepdoc_excel_module = ModuleType("deepdoc.parser.excel_parser")
deepdoc_excel_module.RAGFlowExcelParser = _StubExcelParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.excel_parser", deepdoc_excel_module)
deepdoc_mineru_module = ModuleType("deepdoc.parser.mineru_parser")
class _StubMinerUParser:
pass
deepdoc_mineru_module.MinerUParser = _StubMinerUParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.mineru_parser", deepdoc_mineru_module)
deepdoc_paddle_module = ModuleType("deepdoc.parser.paddleocr_parser")
class _StubPaddleOCRParser:
pass
deepdoc_paddle_module.PaddleOCRParser = _StubPaddleOCRParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.paddleocr_parser", deepdoc_paddle_module)
deepdoc_parser_utils = ModuleType("deepdoc.parser.utils")
deepdoc_parser_utils.get_text = lambda *_args, **_kwargs: ""
monkeypatch.setitem(sys.modules, "deepdoc.parser.utils", deepdoc_parser_utils)
monkeypatch.setitem(sys.modules, "xgboost", ModuleType("xgboost"))
agent_pkg = ModuleType("agent")
agent_pkg.__path__ = []
agent_canvas_mod = ModuleType("agent.canvas")
class _StubCanvas:
def __init__(self, *_args, **_kwargs):
self._dsl = "{}"
def reset(self):
return None
def get_prologue(self):
return "stub prologue"
def get_component_input_form(self, _name):
return {}
def get_mode(self):
return "chat"
def __str__(self):
return self._dsl
agent_canvas_mod.Canvas = _StubCanvas
agent_pkg.canvas = agent_canvas_mod
monkeypatch.setitem(sys.modules, "agent", agent_pkg)
monkeypatch.setitem(sys.modules, "agent.canvas", agent_canvas_mod)
module_path = repo_root / "api" / "apps" / "sdk" / "session.py"
spec = importlib.util.spec_from_file_location("test_session_sdk_routes_unit_module", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, "test_session_sdk_routes_unit_module", module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_create_and_update_guard_matrix(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"name": "session"}))
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.create)("tenant-1", "chat-1"))
assert res["message"] == "You do not own the assistant."
dia = SimpleNamespace(prompt_config={"prologue": "hello"})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [dia])
monkeypatch.setattr(module.ConversationService, "save", lambda **_kwargs: None)
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(inspect.unwrap(module.create)("tenant-1", "chat-1"))
assert "Fail to create a session" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args()))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (False, None))
res = _run(inspect.unwrap(module.create_agent_session)("tenant-1", "agent-1"))
assert res["message"] == "Agent not found."
canvas = SimpleNamespace(dsl="{}", id="agent-1")
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, canvas))
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.create_agent_session)("tenant-1", "agent-1"))
assert res["message"] == "You cannot access the agent."
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
monkeypatch.setattr(module.ConversationService, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.update)("tenant-1", "chat-1", "session-1"))
assert res["message"] == "Session does not exist"
monkeypatch.setattr(module.ConversationService, "query", lambda **_kwargs: [SimpleNamespace(id="session-1")])
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.update)("tenant-1", "chat-1", "session-1"))
assert res["message"] == "You do not own the session"
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="chat-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"message": []}))
res = _run(inspect.unwrap(module.update)("tenant-1", "chat-1", "session-1"))
assert "`message` can not be change" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"reference": []}))
res = _run(inspect.unwrap(module.update)("tenant-1", "chat-1", "session-1"))
assert "`reference` can not be change" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"name": ""}))
res = _run(inspect.unwrap(module.update)("tenant-1", "chat-1", "session-1"))
assert "`name` can not be empty" in res["message"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"name": "renamed"}))
monkeypatch.setattr(module.ConversationService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.update)("tenant-1", "chat-1", "session-1"))
assert res["message"] == "Session updates error"
@pytest.mark.p2
def test_chat_completion_metadata_and_stream_paths(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(kb_ids=["kb-1"])])
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kb_ids: [{"id": "doc-1"}])
monkeypatch.setattr(module, "convert_conditions", lambda cond: cond.get("conditions", []))
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: [])
captured_requests = []
async def fake_rag_completion(_tenant_id, _chat_id, **req):
captured_requests.append(req)
yield {"answer": "ok"}
monkeypatch.setattr(module, "rag_completion", fake_rag_completion)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(None))
resp = _run(inspect.unwrap(module.chat_completion)("tenant-1", "chat-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
_run(_collect_stream(resp.body))
assert captured_requests[-1].get("question") == ""
req_with_conditions = {
"question": "hello",
"session_id": "session-1",
"metadata_condition": {"logic": "and", "conditions": [{"name": "author", "value": "bob"}]},
"stream": True,
}
monkeypatch.setattr(module.ConversationService, "query", lambda **_kwargs: [SimpleNamespace(id="session-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(req_with_conditions))
resp = _run(inspect.unwrap(module.chat_completion)("tenant-1", "chat-1"))
_run(_collect_stream(resp.body))
assert captured_requests[-1].get("doc_ids") == "-999"
req_without_conditions = {
"question": "hello",
"session_id": "session-1",
"metadata_condition": {"logic": "and", "conditions": []},
"stream": True,
"doc_ids": "legacy",
}
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(req_without_conditions))
resp = _run(inspect.unwrap(module.chat_completion)("tenant-1", "chat-1"))
_run(_collect_stream(resp.body))
assert "doc_ids" not in captured_requests[-1]
@pytest.mark.p2
def test_openai_chat_validation_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "num_tokens_from_string", lambda _text: 1)
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(kb_ids=["kb-1"])])
cases = [
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": "bad",
},
"extra_body must be an object.",
),
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": {"reference_metadata": "bad"},
},
"reference_metadata must be an object.",
),
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": {"reference_metadata": {"fields": "bad"}},
},
"reference_metadata.fields must be an array.",
),
({"model": "model", "messages": []}, "You have to provide messages."),
(
{"model": "model", "messages": [{"role": "assistant", "content": "hello"}]},
"The last content of this conversation is not from user.",
),
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": {"metadata_condition": "bad"},
},
"metadata_condition must be an object.",
),
]
for payload, expected in cases:
monkeypatch.setattr(module, "get_request_json", lambda p=payload: _AwaitableValue(p))
res = _run(inspect.unwrap(module.chat_completion_openai_like)("tenant-1", "chat-1"))
assert expected in res["message"]
@pytest.mark.p2
def test_openai_stream_generator_branches_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "num_tokens_from_string", lambda text: len(text or ""))
monkeypatch.setattr(module, "convert_conditions", lambda cond: cond.get("conditions", []))
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: [])
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kb_ids: [{"id": "doc-1"}])
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(kb_ids=["kb-1"])])
monkeypatch.setattr(module, "_build_reference_chunks", lambda *_args, **_kwargs: [{"id": "ref-1"}])
async def fake_async_chat(_dia, _msg, _stream, **_kwargs):
yield {"start_to_think": True}
yield {"answer": "R"}
yield {"end_to_think": True}
yield {"answer": ""}
yield {"answer": "C"}
yield {"final": True, "answer": "DONE", "reference": {"chunks": []}}
raise RuntimeError("boom")
monkeypatch.setattr(module, "async_chat", fake_async_chat)
payload = {
"model": "model",
"stream": True,
"messages": [
{"role": "system", "content": "sys"},
{"role": "assistant", "content": "preface"},
{"role": "user", "content": "hello"},
],
"extra_body": {
"reference": True,
"reference_metadata": {"include": True, "fields": ["author"]},
"metadata_condition": {"logic": "and", "conditions": [{"name": "author", "value": "bob"}]},
},
}
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(payload))
resp = _run(inspect.unwrap(module.chat_completion_openai_like)("tenant-1", "chat-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
chunks = _run(_collect_stream(resp.body))
assert any("reasoning_content" in chunk for chunk in chunks)
assert any("**ERROR**: boom" in chunk for chunk in chunks)
assert any('"usage"' in chunk for chunk in chunks)
assert any('"reference"' in chunk for chunk in chunks)
assert chunks[-1].strip() == "data:[DONE]"
@pytest.mark.p2
def test_openai_nonstream_branch_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "jsonify", lambda payload: payload)
monkeypatch.setattr(module, "num_tokens_from_string", lambda text: len(text or ""))
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(kb_ids=[])])
async def fake_async_chat(_dia, _msg, _stream, **_kwargs):
yield {"answer": "world", "reference": {}}
monkeypatch.setattr(module, "async_chat", fake_async_chat)
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"stream": False,
}
),
)
res = _run(inspect.unwrap(module.chat_completion_openai_like)("tenant-1", "chat-1"))
assert res["choices"][0]["message"]["content"] == "world"
@pytest.mark.p2
def test_agents_openai_compatibility_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "jsonify", lambda payload: payload)
monkeypatch.setattr(module, "num_tokens_from_string", lambda text: len(text or ""))
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"model": "model", "messages": []}))
res = _run(inspect.unwrap(module.agents_completion_openai_compatibility)("tenant-1", "agent-1"))
assert "at least one message" in res["message"]
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"model": "model", "messages": [{"role": "user", "content": "hello"}]}),
)
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.agents_completion_openai_compatibility)("tenant-1", "agent-1"))
assert "don't own the agent" in res["message"]
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [SimpleNamespace(id="agent-1")])
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"model": "model", "messages": [{"role": "system", "content": "system only"}]}),
)
res = _run(inspect.unwrap(module.agents_completion_openai_compatibility)("tenant-1", "agent-1"))
assert "No valid messages found" in json.dumps(res)
captured_calls = []
async def _completion_openai_stream(*args, **kwargs):
captured_calls.append((args, kwargs))
yield "data:stream"
monkeypatch.setattr(module, "completion_openai", _completion_openai_stream)
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"model": "model",
"messages": [
{"role": "assistant", "content": "preface"},
{"role": "user", "content": "latest question"},
],
"stream": True,
"metadata": {"id": "meta-session"},
}
),
)
resp = _run(inspect.unwrap(module.agents_completion_openai_compatibility)("tenant-1", "agent-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
_run(_collect_stream(resp.body))
assert captured_calls[-1][0][2] == "latest question"
async def _completion_openai_nonstream(*args, **kwargs):
captured_calls.append((args, kwargs))
yield {"id": "non-stream"}
monkeypatch.setattr(module, "completion_openai", _completion_openai_nonstream)
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"model": "model",
"messages": [
{"role": "user", "content": "first"},
{"role": "assistant", "content": "middle"},
{"role": "user", "content": "final user"},
],
"stream": False,
"session_id": "session-1",
"temperature": 0.5,
}
),
)
res = _run(inspect.unwrap(module.agents_completion_openai_compatibility)("tenant-1", "agent-1"))
assert res["id"] == "non-stream"
assert captured_calls[-1][0][2] == "final user"
assert captured_calls[-1][1]["stream"] is False
assert captured_calls[-1][1]["session_id"] == "session-1"
@pytest.mark.p2
def test_agent_completions_stream_and_nonstream_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
async def _agent_stream(*_args, **_kwargs):
yield "data:not-json"
yield "data:" + json.dumps({"event": "node_finished", "data": {"component_id": "c1"}})
yield "data:" + json.dumps({"event": "other", "data": {}})
yield "data:" + json.dumps({"event": "message", "data": {"content": "hello"}})
monkeypatch.setattr(module, "agent_completion", _agent_stream)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": True, "return_trace": True}))
resp = _run(inspect.unwrap(module.agent_completions)("tenant-1", "agent-1"))
chunks = _run(_collect_stream(resp.body))
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
assert any('"trace"' in chunk for chunk in chunks)
assert any("hello" in chunk for chunk in chunks)
assert chunks[-1].strip() == "data:[DONE]"
async def _agent_nonstream(*_args, **_kwargs):
yield "data:" + json.dumps({"event": "message", "data": {"content": "A", "reference": {"doc": "r"}}})
yield "data:" + json.dumps({"event": "node_finished", "data": {"component_id": "c2"}})
monkeypatch.setattr(module, "agent_completion", _agent_nonstream)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": False, "return_trace": True}))
res = _run(inspect.unwrap(module.agent_completions)("tenant-1", "agent-1"))
assert res["data"]["data"]["content"] == "A"
assert res["data"]["data"]["reference"] == {"doc": "r"}
assert res["data"]["data"]["trace"][0]["component_id"] == "c2"
async def _agent_nonstream_broken(*_args, **_kwargs):
yield "data:{"
monkeypatch.setattr(module, "agent_completion", _agent_nonstream_broken)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": False, "return_trace": False}))
res = _run(inspect.unwrap(module.agent_completions)("tenant-1", "agent-1"))
assert res["data"].startswith("**ERROR**")
@pytest.mark.p2
def test_list_session_projection_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({})))
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="chat-1")])
convs = [
{
"id": "session-1",
"dialog_id": "chat-1",
"message": [{"role": "assistant", "content": "hello", "prompt": "internal"}],
"reference": [
{
"chunks": [
{
"chunk_id": "chunk-1",
"content_with_weight": "weighted",
"doc_id": "doc-1",
"docnm_kwd": "doc-name",
"kb_id": "kb-1",
"image_id": "img-1",
"positions": [1, 2],
}
]
}
],
}
]
monkeypatch.setattr(module.ConversationService, "get_list", lambda *_args, **_kwargs: convs)
res = _run(inspect.unwrap(module.list_session)("tenant-1", "chat-1"))
assert res["data"][0]["chat_id"] == "chat-1"
assert "reference" not in res["data"][0]
assert "prompt" not in res["data"][0]["messages"][0]
assert res["data"][0]["messages"][0]["reference"][0]["positions"] == [1, 2]
@pytest.mark.p2
def test_list_agent_session_projection_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({})))
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [SimpleNamespace(id="agent-1")])
conv_non_list_reference = {
"id": "session-1",
"dialog_id": "agent-1",
"message": [{"role": "assistant", "content": "hello", "prompt": "internal"}],
"reference": {"unexpected": "shape"},
}
monkeypatch.setattr(module.API4ConversationService, "get_list", lambda *_args, **_kwargs: (1, [conv_non_list_reference]))
res = _run(inspect.unwrap(module.list_agent_session)("tenant-1", "agent-1"))
assert res["data"][0]["agent_id"] == "agent-1"
assert "prompt" not in res["data"][0]["messages"][0]
conv_with_chunks = {
"id": "session-2",
"dialog_id": "agent-1",
"message": [
{"role": "user", "content": "question"},
{"role": "assistant", "content": "answer", "prompt": "internal"},
],
"reference": [
{
"chunks": [
"not-a-dict",
{
"chunk_id": "chunk-2",
"content_with_weight": "weighted",
"doc_id": "doc-2",
"docnm_kwd": "doc-name-2",
"kb_id": "kb-2",
"image_id": "img-2",
"positions": [9],
},
]
}
],
}
monkeypatch.setattr(module.API4ConversationService, "get_list", lambda *_args, **_kwargs: (1, [conv_with_chunks]))
res = _run(inspect.unwrap(module.list_agent_session)("tenant-1", "agent-1"))
projected_chunk = res["data"][0]["messages"][1]["reference"][0]
assert projected_chunk["image_id"] == "img-2"
assert projected_chunk["positions"] == [9]
@pytest.mark.p2
def test_delete_routes_partial_duplicate_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="chat-1")])
monkeypatch.setattr(module.ConversationService, "delete_by_id", lambda *_args, **_kwargs: True)
def _conversation_query(**kwargs):
if "id" not in kwargs:
return [SimpleNamespace(id="seed")]
if kwargs["id"] == "ok":
return [SimpleNamespace(id="ok")]
return []
monkeypatch.setattr(module.ConversationService, "query", _conversation_query)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["ok", "bad"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
res = _run(inspect.unwrap(module.delete)("tenant-1", "chat-1"))
assert res["code"] == 0
assert res["data"]["success_count"] == 1
assert res["data"]["errors"] == ["The chat doesn't own the session bad"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["bad"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
res = _run(inspect.unwrap(module.delete)("tenant-1", "chat-1"))
assert res["message"] == "The chat doesn't own the session bad"
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["ok", "ok"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (["ok"], ["Duplicate session ids: ok"]))
res = _run(inspect.unwrap(module.delete)("tenant-1", "chat-1"))
assert res["code"] == 0
assert res["data"]["success_count"] == 1
assert res["data"]["errors"] == ["Duplicate session ids: ok"]
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [SimpleNamespace(id="agent-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["session-1"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
def _agent_query(**kwargs):
if "id" not in kwargs:
return [SimpleNamespace(id="session-1")]
if kwargs["id"] == "session-1":
return [SimpleNamespace(id="session-1")]
return []
monkeypatch.setattr(module.API4ConversationService, "query", _agent_query)
monkeypatch.setattr(module.API4ConversationService, "delete_by_id", lambda *_args, **_kwargs: True)
res = _run(inspect.unwrap(module.delete_agent_session)("tenant-1", "agent-1"))
assert res["code"] == 0
@pytest.mark.p2
def test_delete_agent_session_error_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [SimpleNamespace(id="agent-1")])
monkeypatch.setattr(module.API4ConversationService, "delete_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["ok", "missing"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (ids, []))
def _query_partial(**kwargs):
if "id" not in kwargs:
return [SimpleNamespace(id="ok"), SimpleNamespace(id="missing")]
if kwargs["id"] == "ok":
return [SimpleNamespace(id="ok")]
return []
monkeypatch.setattr(module.API4ConversationService, "query", _query_partial)
res = _run(inspect.unwrap(module.delete_agent_session)("tenant-1", "agent-1"))
assert res["data"]["success_count"] == 1
assert res["data"]["errors"] == ["The agent doesn't own the session missing"]
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["missing"]}))
def _query_all_failed(**kwargs):
if "id" not in kwargs:
return [SimpleNamespace(id="missing")]
return []
monkeypatch.setattr(module.API4ConversationService, "query", _query_all_failed)
res = _run(inspect.unwrap(module.delete_agent_session)("tenant-1", "agent-1"))
assert res["message"] == "The agent doesn't own the session missing"
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"ids": ["ok", "ok"]}))
monkeypatch.setattr(module, "check_duplicate_ids", lambda ids, _kind: (["ok"], ["Duplicate session ids: ok"]))
def _query_duplicate(**kwargs):
if "id" not in kwargs:
return [SimpleNamespace(id="ok")]
if kwargs["id"] == "ok":
return [SimpleNamespace(id="ok")]
return []
monkeypatch.setattr(module.API4ConversationService, "query", _query_duplicate)
res = _run(inspect.unwrap(module.delete_agent_session)("tenant-1", "agent-1"))
assert res["data"]["success_count"] == 1
assert res["data"]["errors"] == ["Duplicate session ids: ok"]
@pytest.mark.p2
def test_sessions_ask_route_validation_and_stream_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"dataset_ids": ["kb-1"]}))
res = _run(inspect.unwrap(module.ask_about)("tenant-1"))
assert res["message"] == "`question` is required."
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q"}))
res = _run(inspect.unwrap(module.ask_about)("tenant-1"))
assert res["message"] == "`dataset_ids` is required."
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q", "dataset_ids": "kb-1"}))
res = _run(inspect.unwrap(module.ask_about)("tenant-1"))
assert res["message"] == "`dataset_ids` should be a list."
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q", "dataset_ids": ["kb-1"]}))
res = _run(inspect.unwrap(module.ask_about)("tenant-1"))
assert res["message"] == "You don't own the dataset kb-1."
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [SimpleNamespace(chunk_num=0)])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q", "dataset_ids": ["kb-1"]}))
res = _run(inspect.unwrap(module.ask_about)("tenant-1"))
assert res["message"] == "The dataset kb-1 doesn't own parsed file"
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [SimpleNamespace(chunk_num=1)])
captured = {}
async def _streaming_async_ask(question, kb_ids, uid):
captured["question"] = question
captured["kb_ids"] = kb_ids
captured["uid"] = uid
yield {"answer": "first"}
raise RuntimeError("ask stream boom")
monkeypatch.setattr(module, "async_ask", _streaming_async_ask)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q", "dataset_ids": ["kb-1"]}))
resp = _run(inspect.unwrap(module.ask_about)("tenant-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
chunks = _run(_collect_stream(resp.body))
assert any('"answer": "first"' in chunk for chunk in chunks)
assert any('"code": 500' in chunk and "**ERROR**: ask stream boom" in chunk for chunk in chunks)
assert '"data": true' in chunks[-1].lower()
assert captured == {"question": "q", "kb_ids": ["kb-1"], "uid": "tenant-1"}
@pytest.mark.p2
def test_sessions_related_questions_prompt_build_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
res = _run(inspect.unwrap(module.related_questions)("tenant-1"))
assert res["message"] == "`question` is required."
captured = {}
class _FakeLLMBundle:
def __init__(self, *args, **kwargs):
captured["bundle_args"] = args
captured["bundle_kwargs"] = kwargs
async def async_chat(self, prompt, messages, options):
captured["prompt"] = prompt
captured["messages"] = messages
captured["options"] = options
return "1. First related\n2. Second related\nplain text"
monkeypatch.setattr(module, "LLMBundle", _FakeLLMBundle)
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"question": "solar energy", "industry": "renewables"}),
)
res = _run(inspect.unwrap(module.related_questions)("tenant-1"))
assert res["data"] == ["First related", "Second related"]
assert "Keep the term length between 2-4 words" in captured["prompt"]
assert "related terms can also help search engines" in captured["prompt"]
assert "Ensure all search terms are relevant to the industry: renewables." in captured["prompt"]
assert "Keywords: solar energy" in captured["messages"][0]["content"]
assert captured["options"] == {"temperature": 0.9}
@pytest.mark.p2
def test_chatbot_routes_auth_stream_nonstream_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
res = _run(inspect.unwrap(module.chatbot_completions)("dialog-1"))
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.chatbot_completions)("dialog-1"))
assert "API key is invalid" in res["message"]
stream_calls = []
async def _iframe_stream(dialog_id, **req):
stream_calls.append((dialog_id, dict(req)))
yield "data:stream-chunk"
monkeypatch.setattr(module, "iframe_completion", _iframe_stream)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": True}))
resp = _run(inspect.unwrap(module.chatbot_completions)("dialog-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
_run(_collect_stream(resp.body))
assert stream_calls[-1][0] == "dialog-1"
assert stream_calls[-1][1]["quote"] is False
async def _iframe_nonstream(_dialog_id, **_req):
yield {"answer": "non-stream"}
monkeypatch.setattr(module, "iframe_completion", _iframe_nonstream)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": False, "quote": True}))
res = _run(inspect.unwrap(module.chatbot_completions)("dialog-1"))
assert res["data"]["answer"] == "non-stream"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(inspect.unwrap(module.chatbots_inputs)("dialog-1"))
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer invalid"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.chatbots_inputs)("dialog-1"))
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _dialog_id: (False, None))
res = _run(inspect.unwrap(module.chatbots_inputs)("dialog-404"))
assert res["message"] == "Can't find dialog by ID: dialog-404"
@pytest.mark.p2
def test_agentbot_routes_auth_stream_nonstream_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
res = _run(inspect.unwrap(module.agent_bot_completions)("agent-1"))
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.agent_bot_completions)("agent-1"))
assert "API key is invalid" in res["message"]
async def _agent_stream(*_args, **_kwargs):
yield "data:agent-stream"
monkeypatch.setattr(module, "agent_completion", _agent_stream)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": True}))
resp = _run(inspect.unwrap(module.agent_bot_completions)("agent-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
_run(_collect_stream(resp.body))
async def _agent_nonstream(*_args, **_kwargs):
yield {"answer": "agent-non-stream"}
monkeypatch.setattr(module, "agent_completion", _agent_nonstream)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"stream": False}))
res = _run(inspect.unwrap(module.agent_bot_completions)("agent-1"))
assert res["data"]["answer"] == "agent-non-stream"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(inspect.unwrap(module.begin_inputs)("agent-1"))
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.begin_inputs)("agent-1"))
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _agent_id: (False, None))
res = _run(inspect.unwrap(module.begin_inputs)("agent-404"))
assert res["message"] == "Can't find agent by ID: agent-404"
@pytest.mark.p2
def test_searchbots_ask_embedded_auth_and_stream_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(inspect.unwrap(module.ask_about_embedded)())
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.ask_about_embedded)())
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"question": "embedded q", "kb_ids": ["kb-1"], "search_id": "search-1"}),
)
monkeypatch.setattr(module.SearchService, "get_detail", lambda _search_id: {"search_config": {"mode": "test"}})
captured = {}
async def _embedded_async_ask(question, kb_ids, uid, search_config=None):
captured["question"] = question
captured["kb_ids"] = kb_ids
captured["uid"] = uid
captured["search_config"] = search_config
yield {"answer": "embedded-answer"}
raise RuntimeError("embedded stream boom")
monkeypatch.setattr(module, "async_ask", _embedded_async_ask)
resp = _run(inspect.unwrap(module.ask_about_embedded)())
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
chunks = _run(_collect_stream(resp.body))
assert any('"answer": "embedded-answer"' in chunk for chunk in chunks)
assert any('"code": 500' in chunk and "**ERROR**: embedded stream boom" in chunk for chunk in chunks)
assert '"data": true' in chunks[-1].lower()
assert captured["search_config"] == {"mode": "test"}
@pytest.mark.p2
def test_searchbots_retrieval_test_embedded_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
handler = inspect.unwrap(module.retrieval_test_embedded)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(handler())
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer invalid"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(handler())
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"kb_id": [], "question": "q"}))
res = _run(handler())
assert res["message"] == "Please specify dataset firstly."
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"kb_id": "kb-1", "question": "q"}))
res = _run(handler())
assert res["message"] == "permission denined."
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"kb_id": ["kb-no-access"], "question": "q"}))
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-a")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
res = _run(handler())
assert "Only owner of dataset authorized for this operation." in res["message"]
llm_calls = []
def _fake_llm_bundle(tenant_id, llm_type, *args, **kwargs):
llm_calls.append((tenant_id, llm_type, args, kwargs))
return SimpleNamespace(tenant_id=tenant_id, llm_type=llm_type, args=args, kwargs=kwargs)
monkeypatch.setattr(module, "LLMBundle", _fake_llm_bundle)
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"kb_id": "kb-1", "question": "q", "meta_data_filter": {"method": "auto"}}),
)
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kb_ids: [{"id": "doc-1"}])
async def _apply_filter(_meta_filter, _metas, _question, _chat_mdl, _local_doc_ids):
return ["doc-filtered"]
monkeypatch.setattr(module, "apply_meta_data_filter", _apply_filter)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-a")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [SimpleNamespace(id="kb-1")])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(handler())
assert res["message"] == "Knowledgebase not found!"
assert any(call[1] == module.LLMType.CHAT for call in llm_calls)
llm_calls.clear()
retrieval_capture = {}
async def _fake_retrieval(
question,
embd_mdl,
tenant_ids,
kb_ids,
page,
size,
similarity_threshold,
vector_similarity_weight,
top,
local_doc_ids,
rerank_mdl=None,
highlight=None,
rank_feature=None,
):
retrieval_capture.update(
{
"question": question,
"embd_mdl": embd_mdl,
"tenant_ids": tenant_ids,
"kb_ids": kb_ids,
"page": page,
"size": size,
"similarity_threshold": similarity_threshold,
"vector_similarity_weight": vector_similarity_weight,
"top": top,
"local_doc_ids": local_doc_ids,
"rerank_mdl": rerank_mdl,
"highlight": highlight,
"rank_feature": rank_feature,
}
)
return {"chunks": [{"id": "chunk-1", "vector": [0.1]}]}
async def _translate(_tenant_id, _chat_id, question, _langs):
return question + "-translated"
monkeypatch.setattr(module, "cross_languages", _translate)
monkeypatch.setattr(module, "label_question", lambda _question, _kbs: ["label-1"])
monkeypatch.setattr(module.settings, "retriever", SimpleNamespace(retrieval=_fake_retrieval))
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"kb_id": "kb-1",
"question": "translated-q",
"doc_ids": ["doc-seed"],
"cross_languages": ["es"],
"search_id": "search-1",
}
),
)
monkeypatch.setattr(
module.SearchService,
"get_detail",
lambda _search_id: {
"search_config": {
"meta_data_filter": {"method": "auto"},
"chat_id": "chat-for-filter",
"similarity_threshold": 0.42,
"vector_similarity_weight": 0.8,
"top_k": 7,
"rerank_id": "reranker-model",
}
},
)
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kb_ids: [{"id": "doc-2"}])
monkeypatch.setattr(module, "apply_meta_data_filter", _apply_filter)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-a")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [SimpleNamespace(id="kb-1")])
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_id",
lambda _kb_id: (True, SimpleNamespace(tenant_id="tenant-kb", embd_id="embd-model")),
)
res = _run(handler())
assert res["code"] == 0
assert res["data"]["labels"] == ["label-1"]
assert "vector" not in res["data"]["chunks"][0]
assert retrieval_capture["kb_ids"] == ["kb-1"]
assert retrieval_capture["tenant_ids"] == ["tenant-a"]
assert retrieval_capture["question"] == "translated-q-translated"
assert retrieval_capture["similarity_threshold"] == 0.42
assert retrieval_capture["vector_similarity_weight"] == 0.8
assert retrieval_capture["top"] == 7
assert retrieval_capture["local_doc_ids"] == ["doc-filtered"]
assert retrieval_capture["rank_feature"] == ["label-1"]
assert retrieval_capture["rerank_mdl"] is not None
assert any(call[1] == module.LLMType.EMBEDDING.value and call[3].get("llm_name") == "embd-model" for call in llm_calls)
llm_calls.clear()
async def _fake_keyword_extraction(_chat_mdl, question):
return f"-{question}-keywords"
async def _fake_kg_retrieval(question, tenant_ids, kb_ids, _embd_mdl, _chat_mdl):
return {
"id": "kg-chunk",
"question": question,
"tenant_ids": tenant_ids,
"kb_ids": kb_ids,
"content_with_weight": 1,
"vector": [0.5],
}
monkeypatch.setattr(module, "keyword_extraction", _fake_keyword_extraction)
monkeypatch.setattr(module.settings, "kg_retriever", SimpleNamespace(retrieval=_fake_kg_retrieval))
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"kb_id": "kb-1",
"question": "keyword-q",
"rerank_id": "manual-reranker",
"keyword": True,
"use_kg": True,
}
),
)
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_id",
lambda _kb_id: (True, SimpleNamespace(tenant_id="tenant-kb", embd_id="embd-model")),
)
res = _run(handler())
assert res["code"] == 0
assert res["data"]["chunks"][0]["id"] == "kg-chunk"
assert all("vector" not in chunk for chunk in res["data"]["chunks"])
assert any(call[1] == module.LLMType.RERANK.value for call in llm_calls)
async def _raise_not_found(*_args, **_kwargs):
raise RuntimeError("x not_found y")
monkeypatch.setattr(module.settings, "retriever", SimpleNamespace(retrieval=_raise_not_found))
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"kb_id": "kb-1", "question": "q"}),
)
res = _run(handler())
assert res["message"] == "No chunk found! Check the chunk status please!"
@pytest.mark.p2
def test_searchbots_related_questions_embedded_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
handler = inspect.unwrap(module.related_questions_embedded)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(handler())
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(handler())
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q"}))
res = _run(handler())
assert res["message"] == "permission denined."
captured = {}
class _FakeChatBundle:
async def async_chat(self, prompt, messages, options):
captured["prompt"] = prompt
captured["messages"] = messages
captured["options"] = options
return "1. Alpha\n2. Beta\nignored"
def _fake_bundle(*args, **_kwargs):
captured["bundle_args"] = args
return _FakeChatBundle()
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"question": "solar", "search_id": "search-1"}),
)
monkeypatch.setattr(
module.SearchService,
"get_detail",
lambda _search_id: {"search_config": {"chat_id": "chat-x", "llm_setting": {"temperature": 0.2}}},
)
monkeypatch.setattr(module, "LLMBundle", _fake_bundle)
res = _run(handler())
assert res["code"] == 0
assert res["data"] == ["Alpha", "Beta"]
assert captured["bundle_args"] == ("tenant-1", module.LLMType.CHAT, "chat-x")
assert captured["options"] == {"temperature": 0.2}
assert "Keywords: solar" in captured["messages"][0]["content"]
@pytest.mark.p2
def test_searchbots_detail_share_embedded_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
handler = inspect.unwrap(module.detail_share_embedded)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}, args={"search_id": "s-1"}))
res = _run(handler())
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}, args={"search_id": "s-1"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(handler())
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}, args={"search_id": "s-1"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="")])
res = _run(handler())
assert res["message"] == "permission denined."
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-a")])
monkeypatch.setattr(module.SearchService, "query", lambda **_kwargs: [])
res = _run(handler())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "Has no permission for this operation." in res["message"]
monkeypatch.setattr(module.SearchService, "query", lambda **_kwargs: [SimpleNamespace(id="s-1")])
monkeypatch.setattr(module.SearchService, "get_detail", lambda _sid: None)
res = _run(handler())
assert res["message"] == "Can't find this Search App!"
monkeypatch.setattr(module.SearchService, "get_detail", lambda _sid: {"id": "s-1", "name": "search-app"})
res = _run(handler())
assert res["code"] == 0
assert res["data"]["id"] == "s-1"
@pytest.mark.p2
def test_searchbots_mindmap_embedded_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
handler = inspect.unwrap(module.mindmap)
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer"}))
res = _run(handler())
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer bad"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = _run(handler())
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", SimpleNamespace(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"question": "q", "kb_ids": ["kb-1"]}))
captured = {}
async def _gen_ok(question, kb_ids, tenant_id, search_config):
captured["params"] = (question, kb_ids, tenant_id, search_config)
return {"nodes": [question]}
monkeypatch.setattr(module, "gen_mindmap", _gen_ok)
res = _run(handler())
assert res["code"] == 0
assert res["data"] == {"nodes": ["q"]}
assert captured["params"] == ("q", ["kb-1"], "tenant-1", {})
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"question": "q2", "kb_ids": ["kb-1"], "search_id": "search-1"}),
)
monkeypatch.setattr(module.SearchService, "get_detail", lambda _sid: {"search_config": {"mode": "graph"}})
res = _run(handler())
assert res["code"] == 0
assert captured["params"] == ("q2", ["kb-1"], "tenant-1", {"mode": "graph"})
async def _gen_error(*_args, **_kwargs):
return {"error": "mindmap boom"}
monkeypatch.setattr(module, "gen_mindmap", _gen_error)
res = _run(handler())
assert "mindmap boom" in res["message"]
@pytest.mark.p2
def test_sequence2txt_embedded_validation_and_stream_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
handler = inspect.unwrap(module.sequence2txt)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module.tempfile, "mkstemp", lambda suffix: (11, f"/tmp/audio{suffix}"))
monkeypatch.setattr(module.os, "close", lambda _fd: None)
def _set_request(form, files):
monkeypatch.setattr(
module,
"request",
SimpleNamespace(form=_AwaitableValue(form), files=_AwaitableValue(files)),
)
_set_request({"stream": "false"}, {})
res = _run(handler("tenant-1"))
assert "Missing 'file' in multipart form-data" in res["message"]
_set_request({"stream": "false"}, {"file": _DummyUploadFile("bad.txt")})
res = _run(handler("tenant-1"))
assert "Unsupported audio format: .txt" in res["message"]
_set_request({"stream": "false"}, {"file": _DummyUploadFile("audio.wav")})
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _tid: [])
res = _run(handler("tenant-1"))
assert res["message"] == "Tenant not found!"
_set_request({"stream": "false"}, {"file": _DummyUploadFile("audio.wav")})
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _tid: [{"tenant_id": "tenant-1", "asr_id": ""}])
res = _run(handler("tenant-1"))
assert res["message"] == "No default ASR model is set"
class _SyncASR:
def transcription(self, _path):
return "transcribed text"
def stream_transcription(self, _path):
return []
_set_request({"stream": "false"}, {"file": _DummyUploadFile("audio.wav")})
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _tid: [{"tenant_id": "tenant-1", "asr_id": "asr-x"}])
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _SyncASR())
monkeypatch.setattr(module.os, "remove", lambda _path: (_ for _ in ()).throw(RuntimeError("cleanup fail")))
res = _run(handler("tenant-1"))
assert res["code"] == 0
assert res["data"]["text"] == "transcribed text"
class _StreamASR:
def transcription(self, _path):
return ""
def stream_transcription(self, _path):
yield {"event": "partial", "text": "hello"}
_set_request({"stream": "true"}, {"file": _DummyUploadFile("audio.wav")})
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _StreamASR())
monkeypatch.setattr(module.os, "remove", lambda _path: None)
resp = _run(handler("tenant-1"))
assert isinstance(resp, _StubResponse)
assert resp.content_type == "text/event-stream"
chunks = _run(_collect_stream(resp.body))
assert any('"event": "partial"' in chunk for chunk in chunks)
class _ErrorASR:
def transcription(self, _path):
return ""
def stream_transcription(self, _path):
raise RuntimeError("stream asr boom")
_set_request({"stream": "true"}, {"file": _DummyUploadFile("audio.wav")})
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _ErrorASR())
monkeypatch.setattr(module.os, "remove", lambda _path: (_ for _ in ()).throw(RuntimeError("cleanup boom")))
resp = _run(handler("tenant-1"))
chunks = _run(_collect_stream(resp.body))
assert any("stream asr boom" in chunk for chunk in chunks)
@pytest.mark.p2
def test_tts_embedded_stream_and_error_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
handler = inspect.unwrap(module.tts)
monkeypatch.setattr(module, "Response", _StubResponse)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"text": "A。B"}))
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _tid: [])
res = _run(handler("tenant-1"))
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _tid: [{"tenant_id": "tenant-1", "tts_id": ""}])
res = _run(handler("tenant-1"))
assert res["message"] == "No default TTS model is set"
class _TTSOk:
def tts(self, txt):
if not txt:
return []
yield f"chunk-{txt}".encode("utf-8")
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _tid: [{"tenant_id": "tenant-1", "tts_id": "tts-x"}])
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _TTSOk())
resp = _run(handler("tenant-1"))
assert resp.mimetype == "audio/mpeg"
assert resp.headers.get("Cache-Control") == "no-cache"
assert resp.headers.get("Connection") == "keep-alive"
assert resp.headers.get("X-Accel-Buffering") == "no"
chunks = _run(_collect_stream(resp.body))
assert any("chunk-A" in chunk for chunk in chunks)
assert any("chunk-B" in chunk for chunk in chunks)
class _TTSErr:
def tts(self, _txt):
raise RuntimeError("tts boom")
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _TTSErr())
resp = _run(handler("tenant-1"))
chunks = _run(_collect_stream(resp.body))
assert any('"code": 500' in chunk and "**ERROR**: tts boom" in chunk for chunk in chunks)
@pytest.mark.p2
def test_build_reference_chunks_metadata_matrix_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "chunks_format", lambda _reference: [{"dataset_id": "kb-1", "document_id": "doc-1"}])
res = module._build_reference_chunks([], include_metadata=False)
assert res == [{"dataset_id": "kb-1", "document_id": "doc-1"}]
monkeypatch.setattr(module, "chunks_format", lambda _reference: [{"dataset_id": "kb-1"}, {"document_id": "doc-2"}])
res = module._build_reference_chunks([], include_metadata=True)
assert all("document_metadata" not in chunk for chunk in res)
monkeypatch.setattr(module, "chunks_format", lambda _reference: [{"dataset_id": "kb-1", "document_id": "doc-1"}])
monkeypatch.setattr(module.DocMetadataService, "get_metadata_for_documents", lambda _doc_ids, _kb_id: {"doc-1": {"author": "alice"}})
res = module._build_reference_chunks([], include_metadata=True, metadata_fields=[1, None])
assert "document_metadata" not in res[0]
source_chunks = [
{"dataset_id": "kb-1", "document_id": "doc-1"},
{"dataset_id": "kb-2", "document_id": "doc-2"},
{"dataset_id": "kb-1", "document_id": "doc-3"},
{"dataset_id": "kb-1", "document_id": None},
]
monkeypatch.setattr(module, "chunks_format", lambda _reference: [dict(chunk) for chunk in source_chunks])
def _get_metadata(_doc_ids, kb_id):
if kb_id == "kb-1":
return {"doc-1": {"author": "alice", "year": 2024}}
if kb_id == "kb-2":
return {"doc-2": {"author": "bob", "tag": "rag"}}
return {}
monkeypatch.setattr(module.DocMetadataService, "get_metadata_for_documents", _get_metadata)
res = module._build_reference_chunks([], include_metadata=True, metadata_fields=["author", "missing", 3])
assert res[0]["document_metadata"] == {"author": "alice"}
assert res[1]["document_metadata"] == {"author": "bob"}
assert "document_metadata" not in res[2]
assert "document_metadata" not in res[3]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_session_sdk_routes_unit.py",
"license": "Apache License 2.0",
"lines": 1223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_agent_management/test_agent_crud_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from ragflow_sdk import RAGFlow
from ragflow_sdk.modules.agent import Agent
from ragflow_sdk.modules.session import Session
class _DummyResponse:
def __init__(self, payload):
self._payload = payload
def json(self):
return self._payload
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
@pytest.mark.p2
def test_list_agents_success_and_error(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
captured = {}
def _ok_get(path, params=None, json=None):
captured["path"] = path
captured["params"] = params
captured["json"] = json
return _DummyResponse({"code": 0, "data": [{"id": "agent-1", "title": "Agent One"}]})
monkeypatch.setattr(client, "get", _ok_get)
agents = client.list_agents(title="Agent One")
assert captured["path"] == "/agents"
assert captured["params"]["title"] == "Agent One"
assert isinstance(agents[0], Agent), str(agents)
assert agents[0].id == "agent-1", str(agents[0])
assert agents[0].title == "Agent One", str(agents[0])
monkeypatch.setattr(client, "get", lambda *_args, **_kwargs: _DummyResponse({"code": 1, "message": "list boom"}))
with pytest.raises(Exception) as exception_info:
client.list_agents()
assert "list boom" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_create_agent_payload_and_error(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
calls = []
def _ok_post(path, json=None, stream=False, files=None):
calls.append((path, json, stream, files))
return _DummyResponse({"code": 0, "message": "ok"})
monkeypatch.setattr(client, "post", _ok_post)
client.create_agent("agent-title", {"graph": {}}, description=None)
assert calls[-1][0] == "/agents"
assert calls[-1][1] == {"title": "agent-title", "dsl": {"graph": {}}}
client.create_agent("agent-title", {"graph": {}}, description="desc")
assert calls[-1][1] == {"title": "agent-title", "dsl": {"graph": {}}, "description": "desc"}
monkeypatch.setattr(client, "post", lambda *_args, **_kwargs: _DummyResponse({"code": 1, "message": "create boom"}))
with pytest.raises(Exception) as exception_info:
client.create_agent("agent-title", {"graph": {}})
assert "create boom" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_update_agent_payload_matrix_and_error(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
calls = []
def _ok_put(path, json):
calls.append((path, json))
return _DummyResponse({"code": 0, "message": "ok"})
monkeypatch.setattr(client, "put", _ok_put)
cases = [
({"title": "new-title"}, {"title": "new-title"}),
({"description": "new-description"}, {"description": "new-description"}),
({"dsl": {"nodes": []}}, {"dsl": {"nodes": []}}),
(
{"title": "new-title", "description": "new-description", "dsl": {"nodes": []}},
{"title": "new-title", "description": "new-description", "dsl": {"nodes": []}},
),
]
for kwargs, expected_payload in cases:
client.update_agent("agent-1", **kwargs)
assert calls[-1][0] == "/agents/agent-1"
assert calls[-1][1] == expected_payload
monkeypatch.setattr(client, "put", lambda *_args, **_kwargs: _DummyResponse({"code": 1, "message": "update boom"}))
with pytest.raises(Exception) as exception_info:
client.update_agent("agent-1", title="bad")
assert "update boom" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_delete_agent_success_and_error(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
calls = []
def _ok_delete(path, json):
calls.append((path, json))
return _DummyResponse({"code": 0, "message": "ok"})
monkeypatch.setattr(client, "delete", _ok_delete)
client.delete_agent("agent-1")
assert calls[-1] == ("/agents/agent-1", {})
monkeypatch.setattr(client, "delete", lambda *_args, **_kwargs: _DummyResponse({"code": 1, "message": "delete boom"}))
with pytest.raises(Exception) as exception_info:
client.delete_agent("agent-1")
assert "delete boom" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_agent_and_dsl_default_initialization():
client = RAGFlow("token", "http://localhost:9380")
agent = Agent(client, {"id": "agent-1", "title": "Agent One"})
assert agent.id == "agent-1"
assert agent.avatar is None
assert agent.canvas_type is None
assert agent.description is None
assert agent.dsl is None
dsl = Agent.Dsl(client, {})
assert dsl.answer == []
assert "begin" in dsl.components
assert dsl.components["begin"]["obj"]["component_name"] == "Begin"
assert dsl.graph["nodes"][0]["id"] == "begin"
assert dsl.history == []
assert dsl.messages == []
assert dsl.path == []
assert dsl.reference == []
@pytest.mark.p2
def test_agent_session_methods_success_and_error_paths(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
agent = Agent(client, {"id": "agent-1"})
calls = {"post": [], "get": [], "rm": []}
def _ok_post(path, json=None, stream=False, files=None):
calls["post"].append((path, json, stream, files))
return _DummyResponse({"code": 0, "data": {"id": "session-1", "agent_id": "agent-1", "name": "one"}})
def _ok_get(path, params=None):
calls["get"].append((path, params))
return _DummyResponse(
{
"code": 0,
"data": [
{"id": "session-1", "agent_id": "agent-1", "name": "one"},
{"id": "session-2", "agent_id": "agent-1", "name": "two"},
],
}
)
def _ok_rm(path, payload):
calls["rm"].append((path, payload))
return _DummyResponse({"code": 0, "message": "ok"})
monkeypatch.setattr(agent, "post", _ok_post)
monkeypatch.setattr(agent, "get", _ok_get)
monkeypatch.setattr(agent, "rm", _ok_rm)
session = agent.create_session(name="session-name")
assert isinstance(session, Session), str(session)
assert session.id == "session-1", str(session)
assert calls["post"][-1][0] == "/agents/agent-1/sessions"
assert calls["post"][-1][1] == {"name": "session-name"}
sessions = agent.list_sessions(page=2, page_size=5, orderby="create_time", desc=False, id="session-1")
assert len(sessions) == 2, str(sessions)
assert all(isinstance(item, Session) for item in sessions), str(sessions)
assert calls["get"][-1][0] == "/agents/agent-1/sessions"
assert calls["get"][-1][1]["page"] == 2
assert calls["get"][-1][1]["id"] == "session-1"
agent.delete_sessions(ids=["session-1", "session-2"])
assert calls["rm"][-1] == ("/agents/agent-1/sessions", {"ids": ["session-1", "session-2"]})
monkeypatch.setattr(agent, "post", lambda *_args, **_kwargs: _DummyResponse({"code": 1, "message": "create failed"}))
with pytest.raises(Exception, match="create failed"):
agent.create_session(name="bad")
monkeypatch.setattr(agent, "get", lambda *_args, **_kwargs: _DummyResponse({"code": 2, "message": "list failed"}))
with pytest.raises(Exception, match="list failed"):
agent.list_sessions()
monkeypatch.setattr(agent, "rm", lambda *_args, **_kwargs: _DummyResponse({"code": 3, "message": "delete failed"}))
with pytest.raises(Exception, match="delete failed"):
agent.delete_sessions(ids=["session-1"])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_agent_management/test_agent_crud_unit.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_agent_app/test_agents_webhook_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import hashlib
import hmac
import importlib.util
import json
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if value is None or type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _DummyRequest:
def __init__(
self,
*,
path="/api/v1/webhook/agent-1",
method="POST",
headers=None,
content_length=0,
remote_addr="127.0.0.1",
args=None,
json_body=None,
raw_body=b"",
form=None,
files=None,
authorization=None,
):
self.path = path
self.method = method
self.headers = headers or {}
self.content_length = content_length
self.remote_addr = remote_addr
self.args = args or {}
self.authorization = authorization
self.form = _AwaitableValue(form or {})
self.files = _AwaitableValue(files or {})
self._json_body = json_body
self._raw_body = raw_body
async def get_json(self):
return self._json_body
async def get_data(self):
return self._raw_body
class _CanvasRecord:
def __init__(self, *, canvas_category, dsl, user_id="tenant-1"):
self.canvas_category = canvas_category
self.dsl = dsl
self.user_id = user_id
def to_dict(self):
return {"user_id": self.user_id, "dsl": self.dsl}
class _StubCanvas:
def __init__(self, dsl, user_id, agent_id, canvas_id=None):
self.dsl = dsl
self.user_id = user_id
self.agent_id = agent_id
self.canvas_id = canvas_id
async def run(self, **_kwargs):
if False:
yield {}
async def get_files_async(self, desc):
return {"files": desc}
def __str__(self):
return "{}"
class _StubRedisConn:
def __init__(self):
self.bucket_result = [1]
self.bucket_exc = None
self.REDIS = object()
def lua_token_bucket(self, **_kwargs):
if self.bucket_exc is not None:
raise self.bucket_exc
return self.bucket_result
def get(self, _key):
return None
def set_obj(self, _key, _obj, _ttl):
return None
def _run(coro):
return asyncio.run(coro)
def _default_webhook_params(
*,
security=None,
methods=None,
content_types="application/json",
schema=None,
execution_mode="Immediately",
response=None,
):
return {
"mode": "Webhook",
"methods": methods if methods is not None else ["POST"],
"security": security if security is not None else {},
"content_types": content_types,
"schema": schema
if schema is not None
else {
"query": {"properties": {}, "required": []},
"headers": {"properties": {}, "required": []},
"body": {"properties": {}, "required": []},
},
"execution_mode": execution_mode,
"response": response if response is not None else {},
}
def _make_webhook_cvs(module, *, params=None, dsl=None, canvas_category=None):
if dsl is None:
if params is None:
params = _default_webhook_params()
dsl = {
"components": {
"begin": {
"obj": {"component_name": "Begin", "params": params},
"downstream": [],
"upstream": [],
}
}
}
if canvas_category is None:
canvas_category = module.CanvasCategory.Agent
return _CanvasRecord(canvas_category=canvas_category, dsl=dsl)
def _patch_background_task(monkeypatch, module):
def _fake_create_task(coro):
coro.close()
return None
monkeypatch.setattr(module.asyncio, "create_task", _fake_create_task)
def _load_agents_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
agent_pkg = ModuleType("agent")
agent_pkg.__path__ = []
canvas_mod = ModuleType("agent.canvas")
canvas_mod.Canvas = _StubCanvas
agent_pkg.canvas = canvas_mod
monkeypatch.setitem(sys.modules, "agent", agent_pkg)
monkeypatch.setitem(sys.modules, "agent.canvas", canvas_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
canvas_service_mod = ModuleType("api.db.services.canvas_service")
class _StubUserCanvasService:
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def get_list(*_args, **_kwargs):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def delete_by_id(*_args, **_kwargs):
return True
@staticmethod
def get_by_id(_id):
return False, None
canvas_service_mod.UserCanvasService = _StubUserCanvasService
monkeypatch.setitem(sys.modules, "api.db.services.canvas_service", canvas_service_mod)
services_pkg.canvas_service = canvas_service_mod
file_service_mod = ModuleType("api.db.services.file_service")
class _StubFileService:
@staticmethod
def upload_info(*_args, **_kwargs):
return {"id": "uploaded"}
file_service_mod.FileService = _StubFileService
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
services_pkg.file_service = file_service_mod
canvas_version_mod = ModuleType("api.db.services.user_canvas_version")
class _StubUserCanvasVersionService:
@staticmethod
def insert(**_kwargs):
return True
@staticmethod
def delete_all_versions(*_args, **_kwargs):
return True
@staticmethod
def save_or_replace_latest(*_args, **_kwargs):
return True
@staticmethod
def build_version_title(*_args, **_kwargs):
return "stub_version_title"
canvas_version_mod.UserCanvasVersionService = _StubUserCanvasVersionService
monkeypatch.setitem(sys.modules, "api.db.services.user_canvas_version", canvas_version_mod)
services_pkg.user_canvas_version = canvas_version_mod
tenant_llm_service_mod = ModuleType("api.db.services.tenant_llm_service")
class _StubLLMFactoriesService:
@staticmethod
def get_api_key(*_args, **_kwargs):
return None
tenant_llm_service_mod.LLMFactoriesService = _StubLLMFactoriesService
monkeypatch.setitem(sys.modules, "api.db.services.tenant_llm_service", tenant_llm_service_mod)
services_pkg.tenant_llm_service = tenant_llm_service_mod
user_service_mod = ModuleType("api.db.services.user_service")
class _StubUserService:
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def get_by_id(_id):
return False, None
user_service_mod.UserService = _StubUserService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
services_pkg.user_service = user_service_mod
services_pkg.UserService = _StubUserService
# Stub api.apps package to prevent api/apps/__init__.py from executing
# (it triggers heavy imports like quart, settings, DB connections).
api_apps_pkg = ModuleType("api.apps")
api_apps_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.apps", api_apps_pkg)
api_apps_services_pkg = ModuleType("api.apps.services")
api_apps_services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.apps.services", api_apps_services_pkg)
api_apps_pkg.services = api_apps_services_pkg
canvas_replica_mod = ModuleType("api.apps.services.canvas_replica_service")
class _StubCanvasReplicaService:
@classmethod
def normalize_dsl(cls, dsl):
import json
if isinstance(dsl, str):
return json.loads(dsl)
return dsl
@classmethod
def bootstrap(cls, *_args, **_kwargs):
return {}
@classmethod
def load_for_run(cls, *_args, **_kwargs):
return None
@classmethod
def commit_after_run(cls, *_args, **_kwargs):
return True
@classmethod
def replace_for_set(cls, *_args, **_kwargs):
return True
@classmethod
def create_if_absent(cls, *_args, **_kwargs):
return {}
canvas_replica_mod.CanvasReplicaService = _StubCanvasReplicaService
monkeypatch.setitem(sys.modules, "api.apps.services.canvas_replica_service", canvas_replica_mod)
api_apps_services_pkg.canvas_replica_service = canvas_replica_mod
redis_obj = _StubRedisConn()
redis_mod = ModuleType("rag.utils.redis_conn")
redis_mod.REDIS_CONN = redis_obj
monkeypatch.setitem(sys.modules, "rag.utils.redis_conn", redis_mod)
module_path = repo_root / "api" / "apps" / "sdk" / "agents.py"
spec = importlib.util.spec_from_file_location("test_agents_webhook_unit", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
def _assert_bad_request(res, expected_substring):
assert isinstance(res, tuple), res
payload, code = res
assert code == 400, res
assert payload["code"] == 400, payload
assert expected_substring in payload["message"], payload
@pytest.mark.p2
def test_agents_crud_unit_branches(monkeypatch):
module = _load_agents_app(monkeypatch)
monkeypatch.setattr(
module,
"request",
SimpleNamespace(args={"id": "missing", "title": "missing", "desc": "false", "page": "1", "page_size": "10"}),
)
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
res = module.list_agents.__wrapped__("tenant-1")
assert res["code"] == module.RetCode.DATA_ERROR
assert "doesn't exist" in res["message"]
captured = {}
def fake_get_list(_tenant_id, _page, _page_size, _orderby, desc, *_rest):
captured["desc"] = desc
return [{"id": "agent-1"}]
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [{"id": "agent-1"}])
monkeypatch.setattr(module.UserCanvasService, "get_list", fake_get_list)
monkeypatch.setattr(module, "request", SimpleNamespace(args={"desc": "true"}))
res = module.list_agents.__wrapped__("tenant-1")
assert res["code"] == module.RetCode.SUCCESS
assert captured["desc"] is True
async def req_no_dsl():
return {"title": "agent-a"}
monkeypatch.setattr(module, "get_request_json", req_no_dsl)
res = _run(module.create_agent.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "No DSL data in request" in res["message"]
async def req_no_title():
return {"dsl": {"components": {}}}
monkeypatch.setattr(module, "get_request_json", req_no_title)
res = _run(module.create_agent.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "No title in request" in res["message"]
async def req_dup():
return {"dsl": {"components": {}}, "title": "agent-dup"}
monkeypatch.setattr(module, "get_request_json", req_dup)
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [object()])
res = _run(module.create_agent.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "already exists" in res["message"]
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "get_uuid", lambda: "agent-created")
monkeypatch.setattr(module.UserCanvasService, "save", lambda **_kwargs: False)
res = _run(module.create_agent.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Fail to create agent" in res["message"]
async def req_update():
return {"dsl": {"nodes": []}, "title": " webhook-agent ", "unused": None}
monkeypatch.setattr(module, "get_request_json", req_update)
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: False)
res = _run(module.update_agent.__wrapped__("tenant-1", "agent-1"))
assert res["code"] == module.RetCode.OPERATING_ERROR
calls = {"update": 0, "save_or_replace_latest": 0}
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: True)
monkeypatch.setattr(
module.UserCanvasService,
"update_by_id",
lambda *_args, **_kwargs: calls.__setitem__("update", calls["update"] + 1),
)
monkeypatch.setattr(
module.UserCanvasVersionService,
"save_or_replace_latest",
lambda *_args, **_kwargs: calls.__setitem__("save_or_replace_latest", calls["save_or_replace_latest"] + 1),
)
res = _run(module.update_agent.__wrapped__("tenant-1", "agent-1"))
assert res["code"] == module.RetCode.SUCCESS
assert calls == {"update": 1, "save_or_replace_latest": 1}
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: False)
res = module.delete_agent.__wrapped__("tenant-1", "agent-1")
assert res["code"] == module.RetCode.OPERATING_ERROR
@pytest.mark.p2
def test_webhook_prechecks(monkeypatch):
module = _load_agents_app(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Content-Type": "application/json"}, json_body={}))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (False, None))
_assert_bad_request(_run(module.webhook("agent-1")), "Canvas not found")
cvs = _make_webhook_cvs(module, canvas_category=module.CanvasCategory.DataFlow)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Dataflow can not be triggered")
cvs = _make_webhook_cvs(module, dsl="invalid-dsl")
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Invalid DSL format")
cvs = _make_webhook_cvs(
module,
dsl={"components": {"begin": {"obj": {"component_name": "Begin", "params": {"mode": "Chat"}}}}},
)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Webhook not configured")
params = _default_webhook_params(methods=["GET"])
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "not allowed")
@pytest.mark.p2
def test_webhook_security_dispatch(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json"}, json_body={}, args={"a": "b"}),
)
for security in ({}, {"auth_type": "none"}):
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code"), res
assert res.status_code == 200
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security={"auth_type": "unsupported"}))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Unsupported auth_type")
@pytest.mark.p2
def test_webhook_max_body_size(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
base_request = _DummyRequest(headers={"Content-Type": "application/json"}, json_body={})
monkeypatch.setattr(module, "request", base_request)
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security={"auth_type": "none"}))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
security = {"auth_type": "none", "max_body_size": "123"}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Invalid max_body_size format")
security = {"auth_type": "none", "max_body_size": "11mb"}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "exceeds maximum allowed size")
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json"}, json_body={}, content_length=2048),
)
security = {"auth_type": "none", "max_body_size": "1kb"}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Request body too large")
@pytest.mark.p2
def test_webhook_ip_whitelist(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json"}, json_body={}, remote_addr="127.0.0.1"),
)
for whitelist in ([], ["127.0.0.0/24"], ["127.0.0.1"]):
security = {"auth_type": "none", "ip_whitelist": whitelist}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code"), res
assert res.status_code == 200
security = {"auth_type": "none", "ip_whitelist": ["10.0.0.1"]}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "is not allowed")
@pytest.mark.p2
def test_webhook_rate_limit(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Content-Type": "application/json"}, json_body={}))
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security={"auth_type": "none"}))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
bad_limit = {"auth_type": "none", "rate_limit": {"limit": 0, "per": "minute"}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=bad_limit))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "rate_limit.limit must be > 0")
bad_per = {"auth_type": "none", "rate_limit": {"limit": 1, "per": "week"}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=bad_per))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Invalid rate_limit.per")
module.REDIS_CONN.bucket_result = [0]
module.REDIS_CONN.bucket_exc = None
denied = {"auth_type": "none", "rate_limit": {"limit": 1, "per": "minute"}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=denied))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Too many requests")
module.REDIS_CONN.bucket_result = [1]
module.REDIS_CONN.bucket_exc = RuntimeError("redis failure")
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=denied))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Rate limit error")
@pytest.mark.p2
def test_webhook_token_basic_jwt_auth(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Content-Type": "application/json"}, json_body={}))
token_security = {"auth_type": "token", "token": {"token_header": "X-TOKEN", "token_value": "ok"}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=token_security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Invalid token authentication")
monkeypatch.setattr(
module,
"request",
_DummyRequest(
headers={"Content-Type": "application/json"},
json_body={},
authorization=SimpleNamespace(username="u", password="bad"),
),
)
basic_security = {"auth_type": "basic", "basic_auth": {"username": "u", "password": "p"}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=basic_security))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Invalid Basic Auth credentials")
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Content-Type": "application/json"}, json_body={}))
jwt_missing_secret = {"auth_type": "jwt", "jwt": {}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=jwt_missing_secret))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "JWT secret not configured")
jwt_base = {"auth_type": "jwt", "jwt": {"secret": "secret"}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=jwt_base))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Missing Bearer token")
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json", "Authorization": "Bearer "}, json_body={}),
)
_assert_bad_request(_run(module.webhook("agent-1")), "Empty Bearer token")
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json", "Authorization": "Bearer token"}, json_body={}),
)
monkeypatch.setattr(module.jwt, "decode", lambda *_args, **_kwargs: (_ for _ in ()).throw(Exception("decode boom")))
_assert_bad_request(_run(module.webhook("agent-1")), "Invalid JWT")
monkeypatch.setattr(module.jwt, "decode", lambda *_args, **_kwargs: {"exp": 1})
jwt_reserved = {"auth_type": "jwt", "jwt": {"secret": "secret", "required_claims": ["exp"]}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=jwt_reserved))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Reserved JWT claim cannot be required")
monkeypatch.setattr(module.jwt, "decode", lambda *_args, **_kwargs: {})
jwt_missing_claim = {"auth_type": "jwt", "jwt": {"secret": "secret", "required_claims": ["role"]}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=jwt_missing_claim))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
_assert_bad_request(_run(module.webhook("agent-1")), "Missing JWT claim")
captured = {}
def fake_decode(token, options, **kwargs):
captured["token"] = token
captured["options"] = options
captured["kwargs"] = kwargs
return {"role": "admin"}
monkeypatch.setattr(module.jwt, "decode", fake_decode)
jwt_success = {
"auth_type": "jwt",
"jwt": {
"secret": "secret",
"audience": "aud",
"issuer": "iss",
"required_claims": "role",
},
}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=jwt_success))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
assert captured["kwargs"]["audience"] == "aud"
assert captured["kwargs"]["issuer"] == "iss"
assert captured["options"]["verify_aud"] is True
assert captured["options"]["verify_iss"] is True
monkeypatch.setattr(module.jwt, "decode", lambda *_args, **_kwargs: {})
jwt_success_invalid_type = {"auth_type": "jwt", "jwt": {"secret": "secret", "required_claims": 123}}
cvs = _make_webhook_cvs(module, params=_default_webhook_params(security=jwt_success_invalid_type))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
@pytest.mark.p2
def test_webhook_parse_request_branches(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
security = {"auth_type": "none"}
params = _default_webhook_params(security=security, content_types="application/json")
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "text/plain"}, raw_body=b'{"x":1}', json_body={}),
)
with pytest.raises(ValueError, match="Invalid Content-Type"):
_run(module.webhook("agent-1"))
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json"}, json_body={"x": 1}, args={"q": "1"}),
)
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
params = _default_webhook_params(security=security, content_types="multipart/form-data")
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
files = {f"file{i}": object() for i in range(11)}
monkeypatch.setattr(
module,
"request",
_DummyRequest(
headers={"Content-Type": "multipart/form-data"},
form={"key": "value"},
files=files,
json_body={},
),
)
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
uploaded = {"count": 0}
monkeypatch.setattr(
module.FileService,
"upload_info",
lambda *_args, **_kwargs: uploaded.__setitem__("count", uploaded["count"] + 1) or {"id": "uploaded"},
)
monkeypatch.setattr(
module,
"request",
_DummyRequest(
headers={"Content-Type": "multipart/form-data"},
form={"k": "v"},
files={"file1": object()},
json_body={},
),
)
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code")
assert res.status_code == 200
assert uploaded["count"] == 1
@pytest.mark.p2
def test_webhook_canvas_constructor_exception(monkeypatch):
module = _load_agents_app(monkeypatch)
params = _default_webhook_params(security={"auth_type": "none"})
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json"}, json_body={}),
)
monkeypatch.setattr(module, "Canvas", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("canvas init failed")))
def fake_error_result(*, code, message):
return SimpleNamespace(code=code, message=message)
monkeypatch.setattr(module, "get_data_error_result", fake_error_result)
res = _run(module.webhook("agent-1"))
assert isinstance(res, SimpleNamespace)
assert res.code == module.RetCode.BAD_REQUEST
assert "canvas init failed" in res.message
assert res.status_code == module.RetCode.BAD_REQUEST
@pytest.mark.p2
def test_webhook_trace_polling_branches(monkeypatch):
module = _load_agents_app(monkeypatch)
# Missing since_ts.
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args()))
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["webhook_id"] is None
assert res["data"]["events"] == []
assert res["data"]["finished"] is False
# since_ts provided but no Redis data.
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({"since_ts": "100.0"})))
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _k: None)
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["webhook_id"] is None
assert res["data"]["next_since_ts"] == 100.0
assert res["data"]["events"] == []
assert res["data"]["finished"] is False
webhooks_obj = {
"webhooks": {
"101.0": {
"events": [
{"event": "message", "ts": 101.2, "data": {"content": "a"}},
{"event": "finished", "ts": 102.5},
]
},
"99.0": {"events": [{"event": "message", "ts": 99.1}]},
}
}
raw = json.dumps(webhooks_obj)
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _k: raw)
# No candidates newer than since_ts.
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({"since_ts": "200.0"})))
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["webhook_id"] is None
assert res["data"]["next_since_ts"] == 200.0
assert res["data"]["events"] == []
assert res["data"]["finished"] is False
# Candidate exists and webhook id is assigned.
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({"since_ts": "100.0"})))
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
webhook_id = res["data"]["webhook_id"]
assert webhook_id
assert res["data"]["events"] == []
assert res["data"]["next_since_ts"] == 101.0
assert res["data"]["finished"] is False
# Invalid webhook id.
monkeypatch.setattr(
module,
"request",
SimpleNamespace(args=_Args({"since_ts": "100.0", "webhook_id": "bad-id"})),
)
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["webhook_id"] == "bad-id"
assert res["data"]["events"] == []
assert res["data"]["next_since_ts"] == 100.0
assert res["data"]["finished"] is True
# Valid webhook id with event filtering and finished flag.
monkeypatch.setattr(
module,
"request",
SimpleNamespace(args=_Args({"since_ts": "101.0", "webhook_id": webhook_id})),
)
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["webhook_id"] == webhook_id
assert [event["ts"] for event in res["data"]["events"]] == [101.2, 102.5]
assert res["data"]["next_since_ts"] == 102.5
assert res["data"]["finished"] is True
@pytest.mark.p2
def test_webhook_parse_request_form_and_raw_body_paths(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
security = {"auth_type": "none"}
def _run_with(params, req):
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
monkeypatch.setattr(module, "request", req)
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code"), res
assert res.status_code == 200
_run_with(
_default_webhook_params(security=security, content_types="application/x-www-form-urlencoded"),
_DummyRequest(
headers={"Content-Type": "application/x-www-form-urlencoded"},
form={"a": "1", "b": "2"},
json_body={},
),
)
_run_with(
_default_webhook_params(security=security, content_types="text/plain"),
_DummyRequest(headers={"Content-Type": "text/plain"}, raw_body=b'{"k": 1}', json_body={}),
)
_run_with(
_default_webhook_params(security=security, content_types="text/plain"),
_DummyRequest(headers={"Content-Type": "text/plain"}, raw_body=b"{bad-json}", json_body={}),
)
_run_with(
_default_webhook_params(security=security, content_types="text/plain"),
_DummyRequest(headers={"Content-Type": "text/plain"}, raw_body=b"", json_body={}),
)
class _BrokenRawRequest(_DummyRequest):
async def get_data(self):
raise RuntimeError("raw read failed")
_run_with(
_default_webhook_params(security=security, content_types="text/plain"),
_BrokenRawRequest(headers={"Content-Type": "text/plain"}, json_body={}),
)
@pytest.mark.p2
def test_webhook_schema_extract_cast_defaults_and_validation_errors(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
base_schema = {
"query": {
"properties": {
"q_file": {"type": "file"},
"q_object": {"type": "object"},
"q_boolean": {"type": "boolean"},
"q_number": {"type": "number"},
"q_string": {"type": "string"},
"q_array": {"type": "array<string>"},
"q_null": {"type": "null"},
"q_default_none": {},
},
"required": [],
},
"headers": {"properties": {"Content-Type": {"type": "string"}}, "required": []},
"body": {
"properties": {
"bool_true": {"type": "boolean"},
"bool_false": {"type": "boolean"},
"number_int": {"type": "number"},
"number_float": {"type": "number"},
"obj": {"type": "object"},
"arr": {"type": "array<number>"},
"text": {"type": "string"},
"file_list": {"type": "file"},
"unknown": {"type": "mystery"},
},
"required": [
"bool_true",
"number_int",
"obj",
"arr",
"text",
"file_list",
"unknown",
],
},
}
params = _default_webhook_params(
security={"auth_type": "none"},
content_types="application/json",
schema=base_schema,
)
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
monkeypatch.setattr(
module,
"request",
_DummyRequest(
headers={"Content-Type": "application/json"},
args={},
json_body={
"bool_true": "true",
"bool_false": "0",
"number_int": "-3",
"number_float": "2.5",
"obj": '{"a": 1}',
"arr": "[1, 2]",
"text": "hello",
"file_list": ["f1"],
"unknown": "mystery",
},
),
)
res = _run(module.webhook("agent-1"))
assert hasattr(res, "status_code"), res
assert res.status_code == 200
failure_cases = [
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"must": {"type": "string"}}, "required": ["must"]}},
{},
"missing required field",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"flag": {"type": "boolean"}}, "required": ["flag"]}},
{"flag": "maybe"},
"auto-cast failed",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"num": {"type": "number"}}, "required": ["num"]}},
{"num": "abc"},
"auto-cast failed",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"obj": {"type": "object"}}, "required": ["obj"]}},
{"obj": "[]"},
"auto-cast failed",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"arr": {"type": "array<number>"}}, "required": ["arr"]}},
{"arr": "{}"},
"auto-cast failed",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"num": {"type": "number"}}, "required": ["num"]}},
{"num": []},
"type mismatch",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"arr": {"type": "array<number>"}}, "required": ["arr"]}},
{"arr": 3},
"type mismatch",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"arr": {"type": "array<number>"}}, "required": ["arr"]}},
{"arr": [1, "x"]},
"type mismatch",
),
(
{"query": {"properties": {}, "required": []}, "headers": {"properties": {}, "required": []}, "body": {"properties": {"file": {"type": "file"}}, "required": ["file"]}},
{"file": "inline-file"},
"type mismatch",
),
]
for schema, body_payload, expected_substring in failure_cases:
params = _default_webhook_params(
security={"auth_type": "none"},
content_types="application/json",
schema=schema,
)
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
monkeypatch.setattr(
module,
"request",
_DummyRequest(headers={"Content-Type": "application/json"}, json_body=body_payload),
)
res = _run(module.webhook("agent-1"))
_assert_bad_request(res, expected_substring)
@pytest.mark.p2
def test_webhook_immediate_response_status_and_template_validation(monkeypatch):
module = _load_agents_app(monkeypatch)
_patch_background_task(monkeypatch, module)
def _run_case(response_cfg):
params = _default_webhook_params(
security={"auth_type": "none"},
content_types="application/json",
response=response_cfg,
)
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Content-Type": "application/json"}, json_body={}))
return _run(module.webhook("agent-1"))
_assert_bad_request(_run_case({"status": "abc"}), "Invalid response status code")
_assert_bad_request(_run_case({"status": 500}), "must be between 200 and 399")
empty_res = _run_case({"status": 204, "body_template": ""})
assert empty_res.status_code == 204
assert empty_res.content_type == "application/json"
assert _run(empty_res.get_data(as_text=True)) == "null"
json_res = _run_case({"status": 201, "body_template": '{"ok": true}'})
assert json_res.status_code == 201
assert json_res.content_type == "application/json"
assert json.loads(_run(json_res.get_data(as_text=True))) == {"ok": True}
plain_res = _run_case({"status": 202, "body_template": "plain-text"})
assert plain_res.status_code == 202
assert plain_res.content_type == "text/plain"
assert _run(plain_res.get_data(as_text=True)) == "plain-text"
@pytest.mark.p2
def test_webhook_background_run_success_and_error_trace_paths(monkeypatch):
module = _load_agents_app(monkeypatch)
redis_store = {}
def redis_get(key):
return redis_store.get(key)
def redis_set_obj(key, obj, _ttl):
redis_store[key] = json.dumps(obj)
monkeypatch.setattr(module.REDIS_CONN, "get", redis_get)
monkeypatch.setattr(module.REDIS_CONN, "set_obj", redis_set_obj)
update_calls = []
monkeypatch.setattr(module.UserCanvasService, "update_by_id", lambda *_args, **_kwargs: update_calls.append(True))
tasks = []
def _capture_task(coro):
tasks.append(coro)
return SimpleNamespace()
monkeypatch.setattr(module.asyncio, "create_task", _capture_task)
class _CanvasSuccess(_StubCanvas):
async def run(self, **_kwargs):
yield {"event": "message", "data": {"content": "ok"}}
def __str__(self):
return "{}"
monkeypatch.setattr(module, "Canvas", _CanvasSuccess)
params = _default_webhook_params(security={"auth_type": "none"}, content_types="application/json")
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
monkeypatch.setattr(
module,
"request",
_DummyRequest(path="/api/v1/webhook_test/agent-1", headers={"Content-Type": "application/json"}, json_body={}),
)
res = _run(module.webhook("agent-1"))
assert res.status_code == 200
assert len(tasks) == 1
_run(tasks.pop(0))
assert update_calls == [True]
key = "webhook-trace-agent-1-logs"
trace_obj = json.loads(redis_store[key])
ws = next(iter(trace_obj["webhooks"].values()))
events = ws["events"]
assert any(event.get("event") == "message" for event in events)
assert any(event.get("event") == "finished" and event.get("success") is True for event in events)
class _CanvasError(_StubCanvas):
async def run(self, **_kwargs):
raise RuntimeError("run failed")
yield {}
monkeypatch.setattr(module, "Canvas", _CanvasError)
tasks.clear()
redis_store.clear()
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
res = _run(module.webhook("agent-1"))
assert res.status_code == 200
_run(tasks.pop(0))
trace_obj = json.loads(redis_store[key])
ws = next(iter(trace_obj["webhooks"].values()))
events = ws["events"]
assert any(event.get("event") == "error" for event in events)
assert any(event.get("event") == "finished" and event.get("success") is False for event in events)
log_messages = []
monkeypatch.setattr(module.logging, "exception", lambda msg, *_args, **_kwargs: log_messages.append(str(msg)))
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _key: "{")
monkeypatch.setattr(module.REDIS_CONN, "set_obj", lambda *_args, **_kwargs: None)
tasks.clear()
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id, _cvs=cvs: (True, _cvs))
_run(module.webhook("agent-1"))
_run(tasks.pop(0))
assert any("Failed to append webhook trace" in msg for msg in log_messages)
@pytest.mark.p2
def test_webhook_sse_success_and_exception_paths(monkeypatch):
module = _load_agents_app(monkeypatch)
redis_store = {}
monkeypatch.setattr(module.REDIS_CONN, "get", lambda key: redis_store.get(key))
monkeypatch.setattr(module.REDIS_CONN, "set_obj", lambda key, obj, _ttl: redis_store.__setitem__(key, json.dumps(obj)))
params = _default_webhook_params(
security={"auth_type": "none"},
content_types="application/json",
execution_mode="Deferred",
)
cvs = _make_webhook_cvs(module, params=params)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _id: (True, cvs))
class _CanvasSSESuccess(_StubCanvas):
async def run(self, **_kwargs):
yield {"event": "message", "data": {"content": "x", "start_to_think": True}}
yield {"event": "message", "data": {"content": "y", "end_to_think": True}}
yield {"event": "message", "data": {"content": "Hello"}}
yield {"event": "message_end", "data": {"status": "201"}}
monkeypatch.setattr(module, "Canvas", _CanvasSSESuccess)
monkeypatch.setattr(
module,
"request",
_DummyRequest(path="/api/v1/webhook_test/agent-1", headers={"Content-Type": "application/json"}, json_body={}),
)
res = _run(module.webhook("agent-1"))
assert res.status_code == 201
payload = json.loads(_run(res.get_data(as_text=True)))
assert payload == {"message": "<think></think>Hello", "success": True, "code": 201}
class _CanvasSSEError(_StubCanvas):
async def run(self, **_kwargs):
raise RuntimeError("sse failed")
yield {}
monkeypatch.setattr(module, "Canvas", _CanvasSSEError)
monkeypatch.setattr(
module,
"request",
_DummyRequest(path="/api/v1/webhook_test/agent-1", headers={"Content-Type": "application/json"}, json_body={}),
)
res = _run(module.webhook("agent-1"))
assert res.status_code == 400
payload = json.loads(_run(res.get_data(as_text=True)))
assert payload["code"] == 400
assert payload["success"] is False
assert "sse failed" in payload["message"]
@pytest.mark.p2
def test_webhook_trace_encoded_id_generation(monkeypatch):
module = _load_agents_app(monkeypatch)
webhooks_obj = {
"webhooks": {
"101.0": {
"events": [{"event": "message", "ts": 101.2}],
}
}
}
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _key: json.dumps(webhooks_obj))
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({"since_ts": "100.0"})))
res = _run(module.webhook_trace("agent-1"))
assert res["code"] == module.RetCode.SUCCESS
expected = base64.urlsafe_b64encode(
hmac.new(
b"webhook_id_secret",
b"101.0",
hashlib.sha256,
).digest()
).decode("utf-8").rstrip("=")
assert res["data"]["webhook_id"] == expected
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_agent_app/test_agents_webhook_unit.py",
"license": "Apache License 2.0",
"lines": 1054,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_api_app/test_api_tokens_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _ExprField:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, other)
class _DummyAPITokenModel:
tenant_id = _ExprField("tenant_id")
token = _ExprField("token")
def _run(coro):
return asyncio.run(coro)
def _load_api_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args={})
monkeypatch.setitem(sys.modules, "quart", quart_mod)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.login_required = lambda fn: fn
apps_mod.current_user = SimpleNamespace(id="user-1")
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
async def _get_request_json():
return {}
api_utils_mod.generate_confirmation_token = lambda: "token-123"
api_utils_mod.get_request_json = _get_request_json
api_utils_mod.get_json_result = lambda data=None, message="", code=0: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.get_data_error_result = lambda message="", code=400, data=None: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.server_error_response = lambda exc: {
"code": 500,
"message": str(exc),
"data": None,
}
api_utils_mod.validate_request = lambda *_args, **_kwargs: (lambda fn: fn)
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
api_service_mod = ModuleType("api.db.services.api_service")
class _StubAPITokenService:
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def filter_delete(_conds):
return True
class _StubAPI4ConversationService:
@staticmethod
def stats(*_args, **_kwargs):
return []
api_service_mod.APITokenService = _StubAPITokenService
api_service_mod.API4ConversationService = _StubAPI4ConversationService
monkeypatch.setitem(sys.modules, "api.db.services.api_service", api_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
class _StubUserTenantService:
@staticmethod
def query(**_kwargs):
return [SimpleNamespace(tenant_id="tenant-1")]
user_service_mod.UserTenantService = _StubUserTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.APIToken = _DummyAPITokenModel
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
time_utils_mod = ModuleType("common.time_utils")
time_utils_mod.current_timestamp = lambda: 123
time_utils_mod.datetime_format = lambda _dt: "2026-01-01 00:00:00"
monkeypatch.setitem(sys.modules, "common.time_utils", time_utils_mod)
module_path = repo_root / "api" / "apps" / "api_app.py"
spec = importlib.util.spec_from_file_location("test_api_tokens_unit_module", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_new_token_branches_and_error_paths(monkeypatch):
module = _load_api_app(monkeypatch)
async def req_canvas():
return {"canvas_id": "canvas-1"}
monkeypatch.setattr(module, "get_request_json", req_canvas)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
res = _run(module.new_token())
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.APITokenService, "save", lambda **_kwargs: True)
res = _run(module.new_token())
assert res["code"] == 0
assert res["data"]["tenant_id"] == "tenant-1"
assert res["data"]["dialog_id"] == "canvas-1"
assert res["data"]["source"] == "agent"
monkeypatch.setattr(module.APITokenService, "save", lambda **_kwargs: False)
res = _run(module.new_token())
assert res["message"] == "Fail to new a dialog!"
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("query failed")))
res = _run(module.new_token())
assert res["code"] == 500
assert "query failed" in res["message"]
@pytest.mark.p2
def test_token_list_tenant_guard_and_exception(monkeypatch):
module = _load_api_app(monkeypatch)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "request", SimpleNamespace(args={"dialog_id": "d1"}))
res = module.token_list()
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "request", SimpleNamespace(args={}))
res = module.token_list()
assert res["code"] == 500
assert "canvas_id" in res["message"]
@pytest.mark.p2
def test_rm_exception_path(monkeypatch):
module = _load_api_app(monkeypatch)
async def req_rm():
return {"tokens": ["tok-1"], "tenant_id": "tenant-1"}
monkeypatch.setattr(module, "get_request_json", req_rm)
monkeypatch.setattr(
module.APITokenService,
"filter_delete",
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("delete failed")),
)
res = _run(module.rm())
assert res["code"] == 500
assert "delete failed" in res["message"]
@pytest.mark.p2
def test_stats_aggregation_and_error_paths(monkeypatch):
module = _load_api_app(monkeypatch)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "request", SimpleNamespace(args={}))
res = module.stats()
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module, "request", SimpleNamespace(args={"canvas_id": "canvas-1"}))
monkeypatch.setattr(
module.API4ConversationService,
"stats",
lambda *_args, **_kwargs: [
{
"dt": "2026-01-01",
"pv": 3,
"uv": 2,
"tokens": 100,
"duration": 9.9,
"round": 1,
"thumb_up": 0,
}
],
)
res = module.stats()
assert res["code"] == 0
assert res["data"]["pv"] == [("2026-01-01", 3)]
assert res["data"]["uv"] == [("2026-01-01", 2)]
assert res["data"]["round"] == [("2026-01-01", 1)]
assert res["data"]["thumb_up"] == [("2026-01-01", 0)]
assert res["data"]["tokens"] == [("2026-01-01", 0.1)]
assert res["data"]["speed"] == [("2026-01-01", 10.0)]
monkeypatch.setattr(
module.API4ConversationService,
"stats",
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("stats failed")),
)
res = module.stats()
assert res["code"] == 500
assert "stats failed" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_api_app/test_api_tokens_unit.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_auth_app/test_oidc_client_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _FakeResponse:
def __init__(self, payload=None, err=None):
self._payload = payload or {}
self._err = err
def raise_for_status(self):
if self._err:
raise self._err
def json(self):
return self._payload
class _DummyJwkClient:
def __init__(self, _jwks_uri):
self._key = "dummy-signing-key"
def get_signing_key_from_jwt(self, _id_token):
return SimpleNamespace(key=self._key)
def _load_auth_modules(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
apps_pkg = ModuleType("api.apps")
apps_pkg.__path__ = [str(repo_root / "api" / "apps")]
auth_pkg = ModuleType("api.apps.auth")
auth_pkg.__path__ = [str(repo_root / "api" / "apps" / "auth")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
monkeypatch.setitem(sys.modules, "api.apps", apps_pkg)
monkeypatch.setitem(sys.modules, "api.apps.auth", auth_pkg)
for mod_name in ["api.apps.auth.oauth", "api.apps.auth.oidc"]:
sys.modules.pop(mod_name, None)
oauth_path = repo_root / "api" / "apps" / "auth" / "oauth.py"
oauth_spec = importlib.util.spec_from_file_location("api.apps.auth.oauth", oauth_path)
oauth_module = importlib.util.module_from_spec(oauth_spec)
monkeypatch.setitem(sys.modules, "api.apps.auth.oauth", oauth_module)
oauth_spec.loader.exec_module(oauth_module)
oidc_path = repo_root / "api" / "apps" / "auth" / "oidc.py"
oidc_spec = importlib.util.spec_from_file_location("api.apps.auth.oidc", oidc_path)
oidc_module = importlib.util.module_from_spec(oidc_spec)
monkeypatch.setitem(sys.modules, "api.apps.auth.oidc", oidc_module)
oidc_spec.loader.exec_module(oidc_module)
return oauth_module, oidc_module
def _load_github_module(monkeypatch):
_load_auth_modules(monkeypatch)
repo_root = Path(__file__).resolve().parents[4]
sys.modules.pop("api.apps.auth.github", None)
github_path = repo_root / "api" / "apps" / "auth" / "github.py"
github_spec = importlib.util.spec_from_file_location("api.apps.auth.github", github_path)
github_module = importlib.util.module_from_spec(github_spec)
monkeypatch.setitem(sys.modules, "api.apps.auth.github", github_module)
github_spec.loader.exec_module(github_module)
return github_module
def _load_auth_init_module(monkeypatch):
_load_auth_modules(monkeypatch)
repo_root = Path(__file__).resolve().parents[4]
github_mod = ModuleType("api.apps.auth.github")
class _StubGithubOAuthClient:
def __init__(self, config):
self.config = config
github_mod.GithubOAuthClient = _StubGithubOAuthClient
monkeypatch.setitem(sys.modules, "api.apps.auth.github", github_mod)
init_path = repo_root / "api" / "apps" / "auth" / "__init__.py"
init_spec = importlib.util.spec_from_file_location(
"api.apps.auth",
init_path,
submodule_search_locations=[str(repo_root / "api" / "apps" / "auth")],
)
init_module = importlib.util.module_from_spec(init_spec)
monkeypatch.setitem(sys.modules, "api.apps.auth", init_module)
init_spec.loader.exec_module(init_module)
return init_module
def _base_config():
return {
"issuer": "https://issuer.example",
"client_id": "client-1",
"client_secret": "secret-1",
"redirect_uri": "https://app.example/callback",
}
def _metadata(issuer):
return {
"issuer": issuer,
"jwks_uri": f"{issuer}/jwks",
"authorization_endpoint": f"{issuer}/authorize",
"token_endpoint": f"{issuer}/token",
"userinfo_endpoint": f"{issuer}/userinfo",
}
def _make_client(monkeypatch, oidc_module):
monkeypatch.setattr(oidc_module.OIDCClient, "_load_oidc_metadata", staticmethod(lambda issuer: _metadata(issuer)))
return oidc_module.OIDCClient(_base_config())
@pytest.mark.p2
def test_oidc_init_requires_issuer(monkeypatch):
_, oidc_module = _load_auth_modules(monkeypatch)
with pytest.raises(ValueError) as exc_info:
oidc_module.OIDCClient({"client_id": "cid"})
assert str(exc_info.value) == "Missing issuer in configuration."
@pytest.mark.p2
def test_oidc_init_loads_metadata_and_sets_endpoints(monkeypatch):
_, oidc_module = _load_auth_modules(monkeypatch)
monkeypatch.setattr(oidc_module.OIDCClient, "_load_oidc_metadata", staticmethod(lambda issuer: _metadata(issuer)))
client = oidc_module.OIDCClient(_base_config())
assert client.issuer == "https://issuer.example"
assert client.jwks_uri == "https://issuer.example/jwks"
assert client.authorization_url == "https://issuer.example/authorize"
assert client.token_url == "https://issuer.example/token"
assert client.userinfo_url == "https://issuer.example/userinfo"
@pytest.mark.p2
def test_load_oidc_metadata_success_and_wraps_failure(monkeypatch):
_, oidc_module = _load_auth_modules(monkeypatch)
calls = {}
def _ok_sync_request(method, url, timeout):
calls.update({"method": method, "url": url, "timeout": timeout})
return _FakeResponse(_metadata("https://issuer.example"))
monkeypatch.setattr(oidc_module, "sync_request", _ok_sync_request)
metadata = oidc_module.OIDCClient._load_oidc_metadata("https://issuer.example")
assert metadata["jwks_uri"] == "https://issuer.example/jwks"
assert calls == {
"method": "GET",
"url": "https://issuer.example/.well-known/openid-configuration",
"timeout": 7,
}
def _boom_sync_request(*_args, **_kwargs):
raise RuntimeError("metadata boom")
monkeypatch.setattr(oidc_module, "sync_request", _boom_sync_request)
with pytest.raises(ValueError) as exc_info:
oidc_module.OIDCClient._load_oidc_metadata("https://issuer.example")
assert str(exc_info.value) == "Failed to fetch OIDC metadata: metadata boom"
@pytest.mark.p2
def test_parse_id_token_success_and_error(monkeypatch):
_, oidc_module = _load_auth_modules(monkeypatch)
client = _make_client(monkeypatch, oidc_module)
monkeypatch.setattr(oidc_module.jwt, "get_unverified_header", lambda _token: {})
seen = {}
class _JwkClient(_DummyJwkClient):
def __init__(self, jwks_uri):
super().__init__(jwks_uri)
seen["jwks_uri"] = jwks_uri
def get_signing_key_from_jwt(self, id_token):
seen["id_token"] = id_token
return super().get_signing_key_from_jwt(id_token)
monkeypatch.setattr(oidc_module.jwt, "PyJWKClient", _JwkClient)
def _decode(id_token, key, algorithms, audience, issuer):
seen.update(
{
"decode_id_token": id_token,
"decode_key": key,
"algorithms": algorithms,
"audience": audience,
"issuer": issuer,
}
)
return {"sub": "user-1", "email": "id@example.com"}
monkeypatch.setattr(oidc_module.jwt, "decode", _decode)
parsed = client.parse_id_token("id-token-1")
assert parsed["sub"] == "user-1"
assert seen["jwks_uri"] == "https://issuer.example/jwks"
assert seen["decode_key"] == "dummy-signing-key"
assert seen["algorithms"] == ["RS256"]
assert seen["audience"] == "client-1"
assert seen["issuer"] == "https://issuer.example"
def _raise_decode(*_args, **_kwargs):
raise RuntimeError("decode boom")
monkeypatch.setattr(oidc_module.jwt, "decode", _raise_decode)
with pytest.raises(ValueError) as exc_info:
client.parse_id_token("id-token-2")
assert str(exc_info.value) == "Error parsing ID Token: decode boom"
@pytest.mark.p2
def test_fetch_user_info_merges_id_token_and_oauth_userinfo(monkeypatch):
oauth_module, oidc_module = _load_auth_modules(monkeypatch)
client = _make_client(monkeypatch, oidc_module)
monkeypatch.setattr(
oidc_module.OIDCClient,
"parse_id_token",
lambda self, _id_token: {"picture": "id-picture", "email": "id@example.com"},
)
def _fake_parent_fetch(self, access_token, **_kwargs):
assert access_token == "access-1"
return oauth_module.UserInfo(
email="oauth@example.com",
username="oauth-user",
nickname="oauth-nick",
avatar_url=None,
)
monkeypatch.setattr(oauth_module.OAuthClient, "fetch_user_info", _fake_parent_fetch)
info = client.fetch_user_info("access-1", id_token="id-token")
assert info.email == "oauth@example.com"
assert info.username == "oauth-user"
assert info.nickname == "oauth-nick"
assert info.avatar_url == "id-picture"
@pytest.mark.p2
def test_async_fetch_user_info_merges_id_token_and_oauth_userinfo(monkeypatch):
oauth_module, oidc_module = _load_auth_modules(monkeypatch)
client = _make_client(monkeypatch, oidc_module)
monkeypatch.setattr(
oidc_module.OIDCClient,
"parse_id_token",
lambda self, _id_token: {"picture": "id-picture-async", "email": "id-async@example.com"},
)
async def _fake_parent_async_fetch(self, access_token, **_kwargs):
assert access_token == "access-2"
return oauth_module.UserInfo(
email="oauth-async@example.com",
username="oauth-async-user",
nickname="oauth-async-nick",
avatar_url=None,
)
monkeypatch.setattr(oauth_module.OAuthClient, "async_fetch_user_info", _fake_parent_async_fetch)
info = asyncio.run(client.async_fetch_user_info("access-2", id_token="id-token"))
assert info.email == "oauth-async@example.com"
assert info.username == "oauth-async-user"
assert info.nickname == "oauth-async-nick"
assert info.avatar_url == "id-picture-async"
@pytest.mark.p2
def test_normalize_user_info_passthrough(monkeypatch):
oauth_module, oidc_module = _load_auth_modules(monkeypatch)
client = _make_client(monkeypatch, oidc_module)
result = client.normalize_user_info(
{
"email": "user@example.com",
"username": "user",
"nickname": "User",
"picture": "picture-url",
}
)
assert isinstance(result, oauth_module.UserInfo)
assert result.to_dict() == {
"email": "user@example.com",
"username": "user",
"nickname": "User",
"avatar_url": "picture-url",
}
@pytest.mark.p2
def test_get_auth_client_type_inference_and_unsupported(monkeypatch):
auth_module = _load_auth_init_module(monkeypatch)
class _FakeOAuth2Client:
def __init__(self, config):
self.config = config
class _FakeOidcClient:
def __init__(self, config):
self.config = config
class _FakeGithubClient:
def __init__(self, config):
self.config = config
monkeypatch.setattr(
auth_module,
"CLIENT_TYPES",
{
"oauth2": _FakeOAuth2Client,
"oidc": _FakeOidcClient,
"github": _FakeGithubClient,
},
)
oidc_client = auth_module.get_auth_client({"issuer": "https://issuer.example"})
assert isinstance(oidc_client, _FakeOidcClient)
oauth_client = auth_module.get_auth_client({})
assert isinstance(oauth_client, _FakeOAuth2Client)
with pytest.raises(ValueError, match="Unsupported type: invalid"):
auth_module.get_auth_client({"type": "invalid"})
@pytest.mark.p2
def test_github_oauth_client_init_and_normalize_unit(monkeypatch):
github_module = _load_github_module(monkeypatch)
client = github_module.GithubOAuthClient(_base_config())
assert client.authorization_url == "https://github.com/login/oauth/authorize"
assert client.token_url == "https://github.com/login/oauth/access_token"
assert client.userinfo_url == "https://api.github.com/user"
assert client.scope == "user:email"
normalized = client.normalize_user_info(
{
"email": "octo@example.com",
"login": "octocat",
"name": "Octo Cat",
"avatar_url": "https://avatar.example/octocat.png",
}
)
assert normalized.to_dict() == {
"email": "octo@example.com",
"username": "octocat",
"nickname": "Octo Cat",
"avatar_url": "https://avatar.example/octocat.png",
}
normalized_fallback = client.normalize_user_info({"email": "fallback@example.com"})
assert normalized_fallback.to_dict() == {
"email": "fallback@example.com",
"username": "fallback",
"nickname": "fallback",
"avatar_url": "",
}
@pytest.mark.p2
def test_github_fetch_user_info_sync_success_and_error_unit(monkeypatch):
github_module = _load_github_module(monkeypatch)
client = github_module.GithubOAuthClient(_base_config())
calls = []
def _fake_sync_request(method, url, headers=None, timeout=None):
calls.append((method, url, headers, timeout))
if url.endswith("/emails"):
return _FakeResponse(
[
{"email": "other@example.com", "primary": False},
{"email": "octo@example.com", "primary": True},
]
)
return _FakeResponse({"login": "octocat", "name": "Octo Cat", "avatar_url": "https://avatar.example/octocat.png"})
monkeypatch.setattr(github_module, "sync_request", _fake_sync_request)
info = client.fetch_user_info("sync-token")
assert info.to_dict() == {
"email": "octo@example.com",
"username": "octocat",
"nickname": "Octo Cat",
"avatar_url": "https://avatar.example/octocat.png",
}
assert [call[1] for call in calls] == [
"https://api.github.com/user",
"https://api.github.com/user/emails",
]
assert all(call[2]["Authorization"] == "Bearer sync-token" for call in calls)
assert all(call[3] == 7 for call in calls)
def _sync_request_raises(*_args, **_kwargs):
return _FakeResponse(err=RuntimeError("status boom"))
monkeypatch.setattr(github_module, "sync_request", _sync_request_raises)
with pytest.raises(ValueError, match="Failed to fetch github user info: status boom"):
client.fetch_user_info("sync-token")
@pytest.mark.p2
def test_github_fetch_user_info_async_success_and_error_unit(monkeypatch):
github_module = _load_github_module(monkeypatch)
client = github_module.GithubOAuthClient(_base_config())
calls = []
async def _fake_async_request(method, url, headers=None, **kwargs):
calls.append((method, url, headers, kwargs.get("timeout")))
if url.endswith("/emails"):
return _FakeResponse(
[
{"email": "other@example.com", "primary": False},
{"email": "octo-async@example.com", "primary": True},
]
)
return _FakeResponse(
{"login": "octocat-async", "name": "Octo Async", "avatar_url": "https://avatar.example/octo-async.png"}
)
monkeypatch.setattr(github_module, "async_request", _fake_async_request)
info = asyncio.run(client.async_fetch_user_info("async-token"))
assert info.to_dict() == {
"email": "octo-async@example.com",
"username": "octocat-async",
"nickname": "Octo Async",
"avatar_url": "https://avatar.example/octo-async.png",
}
assert [call[1] for call in calls] == [
"https://api.github.com/user",
"https://api.github.com/user/emails",
]
assert all(call[2]["Authorization"] == "Bearer async-token" for call in calls)
assert all(call[3] == 7 for call in calls)
async def _async_request_raises(*_args, **_kwargs):
return _FakeResponse(err=RuntimeError("async status boom"))
monkeypatch.setattr(github_module, "async_request", _async_request_raises)
with pytest.raises(ValueError, match="Failed to fetch github user info: async status boom"):
asyncio.run(client.async_fetch_user_info("async-token"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_auth_app/test_oidc_client_unit.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_chunk_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import importlib.util
import json
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Vec(list):
def __mul__(self, scalar):
return _Vec([scalar * x for x in self])
__rmul__ = __mul__
def __add__(self, other):
return _Vec([a + b for a, b in zip(self, other)])
def tolist(self):
return list(self)
class _DummyDoc:
def __init__(self, *, doc_id="doc-1", kb_id="kb-1", name="Doc", parser_id="naive"):
self.id = doc_id
self.kb_id = kb_id
self.name = name
self.parser_id = parser_id
def to_dict(self):
return {"id": self.id, "kb_id": self.kb_id, "name": self.name}
class _DummyRetCode:
SUCCESS = 0
DATA_ERROR = 102
EXCEPTION_ERROR = 100
OPERATING_ERROR = 103
class _DummyParserType:
QA = "qa"
NAIVE = "naive"
class _DummyRetriever:
async def search(self, query, _index_name, _kb_ids, highlight=None):
class _SRes:
total = 1
ids = ["chunk-1"]
field = {
"chunk-1": {
"content_with_weight": "chunk content",
"doc_id": "doc-1",
"docnm_kwd": "Doc",
"important_kwd": ["k1"],
"question_kwd": ["q1"],
"img_id": "img-1",
"available_int": 1,
"position_int": [],
"doc_type_kwd": "text",
}
}
highlight = {"chunk-1": " highlighted content "}
_ = (query, highlight)
return _SRes()
class _DummyDocStore:
def __init__(self):
self.updated = []
self.inserted = []
self.deleted_inputs = []
self.to_delete = [1]
self.chunk = {
"id": "chunk-1",
"doc_id": "doc-1",
"kb_id": "kb-1",
"content_with_weight": "chunk content",
"docnm_kwd": "Doc",
"q_2_vec": [0.1, 0.2],
"content_tks": ["a"],
"content_ltks": ["b"],
"content_sm_ltks": ["c"],
}
def get(self, *_args, **_kwargs):
return dict(self.chunk) if self.chunk is not None else None
def update(self, condition, payload, *_args, **_kwargs):
self.updated.append((condition, payload))
return True
def delete(self, condition, *_args, **_kwargs):
self.deleted_inputs.append(condition)
if not self.to_delete:
return 0
return self.to_delete.pop(0)
def insert(self, docs, *_args, **_kwargs):
self.inserted.extend(docs)
class _DummyStorage:
def __init__(self):
self.put_calls = []
self.rm_calls = []
def put(self, bucket, name, binary):
self.put_calls.append((bucket, name, binary))
def obj_exist(self, _bucket, _name):
return True
def rm(self, bucket, name):
self.rm_calls.append((bucket, name))
class _DummyTenant:
def __init__(self, tenant_id="tenant-1"):
self.tenant_id = tenant_id
class _DummyLLMBundle:
def __init__(self, *_args, **_kwargs):
pass
def encode(self, _inputs):
return [_Vec([1.0, 2.0]), _Vec([3.0, 4.0])], 9
class _DummyXXHash:
def __init__(self, data):
self._data = data
def hexdigest(self):
return f"chunk-{len(self._data)}"
def _run(coro):
return asyncio.run(coro)
def _load_chunk_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args={}, headers={})
monkeypatch.setitem(sys.modules, "quart", quart_mod)
xxhash_mod = ModuleType("xxhash")
xxhash_mod.xxh64 = lambda data: _DummyXXHash(data)
monkeypatch.setitem(sys.modules, "xxhash", xxhash_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
settings_mod = ModuleType("common.settings")
settings_mod.retriever = _DummyRetriever()
settings_mod.docStoreConn = _DummyDocStore()
settings_mod.STORAGE_IMPL = _DummyStorage()
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
common_pkg.settings = settings_mod
constants_mod = ModuleType("common.constants")
class _DummyLLMType:
EMBEDDING = SimpleNamespace(value="embedding")
CHAT = SimpleNamespace(value="chat")
RERANK = SimpleNamespace(value="rerank")
constants_mod.RetCode = _DummyRetCode
constants_mod.LLMType = _DummyLLMType
constants_mod.ParserType = _DummyParserType
constants_mod.PAGERANK_FLD = "pagerank_flt"
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
string_utils_mod = ModuleType("common.string_utils")
string_utils_mod.remove_redundant_spaces = lambda text: " ".join(str(text).split())
monkeypatch.setitem(sys.modules, "common.string_utils", string_utils_mod)
metadata_utils_mod = ModuleType("common.metadata_utils")
metadata_utils_mod.apply_meta_data_filter = lambda *_args, **_kwargs: {}
monkeypatch.setitem(sys.modules, "common.metadata_utils", metadata_utils_mod)
misc_utils_mod = ModuleType("common.misc_utils")
async def _thread_pool_exec(func):
return func()
misc_utils_mod.thread_pool_exec = _thread_pool_exec
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
rag_pkg = ModuleType("rag")
rag_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag", rag_pkg)
rag_app_pkg = ModuleType("rag.app")
rag_app_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.app", rag_app_pkg)
rag_qa_mod = ModuleType("rag.app.qa")
rag_qa_mod.rmPrefix = lambda text: str(text).strip("Q: ").strip("A: ")
rag_qa_mod.beAdoc = lambda d, q, a, _latin: {**d, "question_kwd": [q], "content_with_weight": f"{q}\n{a}"}
monkeypatch.setitem(sys.modules, "rag.app.qa", rag_qa_mod)
rag_tag_mod = ModuleType("rag.app.tag")
rag_tag_mod.label_question = lambda *_args, **_kwargs: []
monkeypatch.setitem(sys.modules, "rag.app.tag", rag_tag_mod)
rag_nlp_mod = ModuleType("rag.nlp")
rag_nlp_mod.rag_tokenizer = SimpleNamespace(
tokenize=lambda text: [str(text)],
fine_grained_tokenize=lambda toks: [f"fg:{t}" for t in toks],
is_chinese=lambda _text: False,
)
rag_nlp_mod.search = SimpleNamespace(index_name=lambda tenant_id: f"idx-{tenant_id}")
monkeypatch.setitem(sys.modules, "rag.nlp", rag_nlp_mod)
rag_prompts_pkg = ModuleType("rag.prompts")
rag_prompts_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.prompts", rag_prompts_pkg)
rag_generator_mod = ModuleType("rag.prompts.generator")
rag_generator_mod.cross_languages = lambda *_args, **_kwargs: []
rag_generator_mod.keyword_extraction = lambda *_args, **_kwargs: []
monkeypatch.setitem(sys.modules, "rag.prompts.generator", rag_generator_mod)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="user-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
api_utils_mod.get_json_result = lambda data=None, message="", code=0: {"code": code, "message": message, "data": data}
api_utils_mod.get_data_error_result = lambda message="": {"code": _DummyRetCode.DATA_ERROR, "message": message, "data": False}
api_utils_mod.server_error_response = lambda exc: {"code": _DummyRetCode.EXCEPTION_ERROR, "message": repr(exc), "data": False}
api_utils_mod.validate_request = lambda *_args, **_kwargs: (lambda fn: fn)
api_utils_mod.get_request_json = lambda: _AwaitableValue({})
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
document_service_mod = ModuleType("api.db.services.document_service")
class _DocumentService:
decrement_calls = []
increment_calls = []
@staticmethod
def get_tenant_id(_doc_id):
return "tenant-1"
@staticmethod
def get_by_id(doc_id):
return True, _DummyDoc(doc_id=doc_id, parser_id=_DummyParserType.NAIVE)
@staticmethod
def get_embd_id(_doc_id):
return "embed-1"
@staticmethod
def decrement_chunk_num(*args):
_DocumentService.decrement_calls.append(args)
@staticmethod
def increment_chunk_num(*args):
_DocumentService.increment_calls.append(args)
document_service_mod.DocumentService = _DocumentService
monkeypatch.setitem(sys.modules, "api.db.services.document_service", document_service_mod)
services_pkg.document_service = document_service_mod
doc_metadata_service_mod = ModuleType("api.db.services.doc_metadata_service")
doc_metadata_service_mod.DocMetadataService = type("DocMetadataService", (), {})
monkeypatch.setitem(sys.modules, "api.db.services.doc_metadata_service", doc_metadata_service_mod)
services_pkg.doc_metadata_service = doc_metadata_service_mod
kb_service_mod = ModuleType("api.db.services.knowledgebase_service")
class _KnowledgebaseService:
@staticmethod
def get_kb_ids(_tenant_id):
return ["kb-1"]
@staticmethod
def get_by_id(_kb_id):
return True, SimpleNamespace(pagerank=0.6)
kb_service_mod.KnowledgebaseService = _KnowledgebaseService
monkeypatch.setitem(sys.modules, "api.db.services.knowledgebase_service", kb_service_mod)
services_pkg.knowledgebase_service = kb_service_mod
llm_service_mod = ModuleType("api.db.services.llm_service")
llm_service_mod.LLMBundle = _DummyLLMBundle
monkeypatch.setitem(sys.modules, "api.db.services.llm_service", llm_service_mod)
services_pkg.llm_service = llm_service_mod
search_service_mod = ModuleType("api.db.services.search_service")
search_service_mod.SearchService = type("SearchService", (), {})
monkeypatch.setitem(sys.modules, "api.db.services.search_service", search_service_mod)
services_pkg.search_service = search_service_mod
user_service_mod = ModuleType("api.db.services.user_service")
class _UserTenantService:
@staticmethod
def query(**_kwargs):
return [_DummyTenant("tenant-1")]
user_service_mod.UserTenantService = _UserTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
services_pkg.user_service = user_service_mod
module_name = "test_chunk_routes_unit_module"
module_path = repo_root / "api" / "apps" / "chunk_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(payload))
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
@pytest.mark.p2
def test_list_chunk_exception_branches_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "keywords": "chunk", "available_int": 0})
res = _run(module.list_chunk())
assert res["code"] == 0, res
assert res["data"]["total"] == 1, res
assert res["data"]["chunks"][0]["available_int"] == 1, res
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "")
_set_request_json(monkeypatch, module, {"doc_id": "doc-1"})
res = _run(module.list_chunk())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert res["message"] == "Tenant not found!", res
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1"})
res = _run(module.list_chunk())
assert res["message"] == "Document not found!", res
async def _raise_not_found(*_args, **_kwargs):
raise Exception("x not_found y")
monkeypatch.setattr(module.settings.retriever, "search", _raise_not_found)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, _DummyDoc()))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1"})
res = _run(module.list_chunk())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert res["message"] == "No chunk found!", res
async def _raise_generic(*_args, **_kwargs):
raise RuntimeError("boom")
monkeypatch.setattr(module.settings.retriever, "search", _raise_generic)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1"})
res = _run(module.list_chunk())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "boom" in res["message"], res
@pytest.mark.p2
def test_get_chunk_sanitize_and_exception_matrix_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
module.request = SimpleNamespace(args={"chunk_id": "chunk-1"}, headers={})
res = module.get()
assert res["code"] == 0, res
assert "q_2_vec" not in res["data"], res
assert "content_tks" not in res["data"], res
assert "content_ltks" not in res["data"], res
assert "content_sm_ltks" not in res["data"], res
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
res = module.get()
assert res["message"] == "Tenant not found!", res
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [_DummyTenant("tenant-1")])
module.settings.docStoreConn.chunk = None
res = module.get()
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "Chunk not found" in res["message"], res
def _raise_not_found(*_args, **_kwargs):
raise Exception("NotFoundError: chunk-1")
monkeypatch.setattr(module.settings.docStoreConn, "get", _raise_not_found)
res = module.get()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert res["message"] == "Chunk not found!", res
def _raise_generic(*_args, **_kwargs):
raise RuntimeError("get boom")
monkeypatch.setattr(module.settings.docStoreConn, "get", _raise_generic)
res = module.get()
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "get boom" in res["message"], res
@pytest.mark.p2
def test_set_chunk_bytes_qa_image_and_guard_matrix_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_id": "chunk-1", "content_with_weight": 1})
with pytest.raises(TypeError, match="expected string or bytes-like object"):
_run(module.set())
_set_request_json(
monkeypatch,
module,
{"doc_id": "doc-1", "chunk_id": "chunk-1", "content_with_weight": "abc", "important_kwd": "bad"},
)
res = _run(module.set())
assert res["message"] == "`important_kwd` should be a list", res
_set_request_json(
monkeypatch,
module,
{"doc_id": "doc-1", "chunk_id": "chunk-1", "content_with_weight": "abc", "question_kwd": "bad"},
)
res = _run(module.set())
assert res["message"] == "`question_kwd` should be a list", res
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "")
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_id": "chunk-1", "content_with_weight": "abc"})
res = _run(module.set())
assert res["message"] == "Tenant not found!", res
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_id": "chunk-1", "content_with_weight": "abc"})
res = _run(module.set())
assert res["message"] == "Document not found!", res
monkeypatch.setattr(
module.DocumentService,
"get_by_id",
lambda _doc_id: (True, _DummyDoc(doc_id="doc-1", parser_id=module.ParserType.NAIVE)),
)
_set_request_json(
monkeypatch,
module,
{
"doc_id": "doc-1",
"chunk_id": "chunk-1",
"content_with_weight": b"bytes-content",
"important_kwd": ["important"],
"question_kwd": ["question"],
"tag_kwd": ["tag"],
"tag_feas": [0.1],
"available_int": 0,
},
)
res = _run(module.set())
assert res["code"] == 0, res
assert module.settings.docStoreConn.updated[-1][1]["content_with_weight"] == "bytes-content"
monkeypatch.setattr(
module.DocumentService,
"get_by_id",
lambda _doc_id: (True, _DummyDoc(doc_id="doc-1", parser_id=module.ParserType.QA)),
)
_set_request_json(
monkeypatch,
module,
{
"doc_id": "doc-1",
"chunk_id": "chunk-2",
"content_with_weight": "Q:Question\nA:Answer",
"image_base64": base64.b64encode(b"image").decode("utf-8"),
"img_id": "bucket-name",
},
)
res = _run(module.set())
assert res["code"] == 0, res
assert module.settings.STORAGE_IMPL.put_calls, "image storage branch should be called"
async def _raise_thread_pool(_func):
raise RuntimeError("set tp boom")
monkeypatch.setattr(module, "thread_pool_exec", _raise_thread_pool)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_id": "chunk-1", "content_with_weight": "abc"})
res = _run(module.set())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "set tp boom" in res["message"], res
@pytest.mark.p2
def test_switch_chunk_success_failure_and_exception_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1"], "available_int": 1})
res = _run(module.switch())
assert res["message"] == "Document not found!", res
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, _DummyDoc()))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.settings.docStoreConn, "update", lambda *_args, **_kwargs: False)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1", "c2"], "available_int": 0})
res = _run(module.switch())
assert res["message"] == "Index updating failure", res
monkeypatch.setattr(module.settings.docStoreConn, "update", lambda *_args, **_kwargs: True)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1", "c2"], "available_int": 1})
res = _run(module.switch())
assert res["code"] == 0, res
assert res["data"] is True, res
async def _raise_thread_pool(_func):
raise RuntimeError("switch tp boom")
monkeypatch.setattr(module, "thread_pool_exec", _raise_thread_pool)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1"], "available_int": 1})
res = _run(module.switch())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "switch tp boom" in res["message"], res
@pytest.mark.p2
def test_rm_chunk_delete_exception_partial_compensation_and_cleanup_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1"]})
res = _run(module.rm())
assert res["message"] == "Document not found!", res
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, _DummyDoc()))
def _raise_delete(*_args, **_kwargs):
raise RuntimeError("delete boom")
monkeypatch.setattr(module.settings.docStoreConn, "delete", _raise_delete)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1"]})
res = _run(module.rm())
assert res["message"] == "Chunk deleting failure", res
def _delete(condition, *_args, **_kwargs):
module.settings.docStoreConn.deleted_inputs.append(condition)
if not module.settings.docStoreConn.to_delete:
return 0
return module.settings.docStoreConn.to_delete.pop(0)
module.settings.docStoreConn.to_delete = [0]
monkeypatch.setattr(module.settings.docStoreConn, "delete", _delete)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1"]})
res = _run(module.rm())
assert res["message"] == "Index updating failure", res
module.settings.docStoreConn.to_delete = [1, 2]
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1", "c2", "c3"]})
res = _run(module.rm())
assert res["code"] == 0, res
assert module.DocumentService.decrement_calls, "decrement_chunk_num should be called"
assert len(module.settings.STORAGE_IMPL.rm_calls) >= 1
module.settings.docStoreConn.to_delete = [1]
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": "c1"})
res = _run(module.rm())
assert res["code"] == 0, res
async def _raise_thread_pool(_func):
raise RuntimeError("rm tp boom")
monkeypatch.setattr(module, "thread_pool_exec", _raise_thread_pool)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "chunk_ids": ["c1"]})
res = _run(module.rm())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "rm tp boom" in res["message"], res
@pytest.mark.p2
def test_create_chunk_guards_pagerank_and_success_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
module.request = SimpleNamespace(headers={"X-Request-ID": "req-1"}, args={})
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "content_with_weight": "chunk", "important_kwd": "bad"})
res = _run(module.create())
assert res["message"] == "`important_kwd` is required to be a list", res
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "content_with_weight": "chunk", "question_kwd": "bad"})
res = _run(module.create())
assert res["message"] == "`question_kwd` is required to be a list", res
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "content_with_weight": "chunk"})
res = _run(module.create())
assert res["message"] == "Document not found!", res
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, _DummyDoc(doc_id="doc-1")))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "")
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "content_with_weight": "chunk"})
res = _run(module.create())
assert res["message"] == "Tenant not found!", res
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "content_with_weight": "chunk"})
res = _run(module.create())
assert res["message"] == "Knowledgebase not found!", res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, SimpleNamespace(pagerank=0.8)))
_set_request_json(
monkeypatch,
module,
{
"doc_id": "doc-1",
"content_with_weight": "chunk",
"important_kwd": ["i1"],
"question_kwd": ["q1"],
"tag_feas": [0.2],
},
)
res = _run(module.create())
assert res["code"] == 0, res
assert res["data"]["chunk_id"], res
assert module.settings.docStoreConn.inserted, "insert should be called"
inserted = module.settings.docStoreConn.inserted[-1]
assert "pagerank_flt" in inserted
assert module.DocumentService.increment_calls, "increment_chunk_num should be called"
async def _raise_thread_pool(_func):
raise RuntimeError("create tp boom")
monkeypatch.setattr(module, "thread_pool_exec", _raise_thread_pool)
_set_request_json(monkeypatch, module, {"doc_id": "doc-1", "content_with_weight": "chunk"})
res = _run(module.create())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "create tp boom" in res["message"], res
@pytest.mark.p2
def test_retrieval_test_branch_matrix_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
module.request = SimpleNamespace(headers={"X-Request-ID": "req-r"}, args={})
applied_filters = []
llm_calls = []
cross_calls = []
keyword_calls = []
async def _apply_filter(meta_data_filter, metas, question, chat_mdl, local_doc_ids):
applied_filters.append(
{
"meta_data_filter": meta_data_filter,
"metas": metas,
"question": question,
"chat_mdl": chat_mdl,
"local_doc_ids": list(local_doc_ids),
}
)
return ["doc-filtered"]
async def _cross_languages(_tenant_id, _dialog, question, langs):
cross_calls.append((question, tuple(langs)))
return f"{question}-xl"
async def _keyword_extraction(_chat_mdl, question):
keyword_calls.append(question)
return "-kw"
class _Retriever:
def __init__(self, mode="ok"):
self.mode = mode
self.retrieval_questions = []
async def retrieval(self, question, *_args, **_kwargs):
if self.mode == "not_found":
raise Exception("boom not_found boom")
if self.mode == "explode":
raise RuntimeError("retrieval boom")
self.retrieval_questions.append(question)
return {"chunks": [{"id": "c1", "vector": [0.1], "content_with_weight": "chunk-content"}]}
def retrieval_by_children(self, chunks, _tenant_ids):
return list(chunks)
class _KgRetriever:
async def retrieval(self, *_args, **_kwargs):
return {"id": "kg-1", "content_with_weight": "kg-content"}
class _NoContentKgRetriever:
async def retrieval(self, *_args, **_kwargs):
return {"id": "kg-2", "content_with_weight": ""}
monkeypatch.setattr(module, "LLMBundle", lambda *args, **kwargs: llm_calls.append((args, kwargs)) or SimpleNamespace())
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kb_ids: [{"meta": "v"}], raising=False)
monkeypatch.setattr(module, "apply_meta_data_filter", _apply_filter)
monkeypatch.setattr(module.SearchService, "get_detail", lambda _sid: {"search_config": {"meta_data_filter": {"method": "auto"}, "chat_id": "chat-1"}}, raising=False)
monkeypatch.setattr(module, "cross_languages", _cross_languages)
monkeypatch.setattr(module, "keyword_extraction", _keyword_extraction)
monkeypatch.setattr(module, "label_question", lambda *_args, **_kwargs: ["lbl"])
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [_DummyTenant("tenant-1")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: False, raising=False)
_set_request_json(monkeypatch, module, {"kb_id": "kb-1", "question": "q", "search_id": "search-1"})
res = _run(module.retrieval_test())
assert res["code"] == module.RetCode.OPERATING_ERROR, res
assert "Only owner of dataset authorized for this operation." in res["message"], res
assert applied_filters and applied_filters[-1]["meta_data_filter"]["method"] == "auto"
assert llm_calls, "search_id metadata auto branch should instantiate chat model"
_set_request_json(monkeypatch, module, {"kb_id": [], "question": "q"})
res = _run(module.retrieval_test())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Please specify dataset firstly." in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: True, raising=False)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None), raising=False)
_set_request_json(
monkeypatch,
module,
{"kb_id": ["kb-1"], "question": "q", "meta_data_filter": {"method": "semi_auto"}},
)
res = _run(module.retrieval_test())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Knowledgebase not found!" in res["message"], res
retriever = _Retriever(mode="ok")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, SimpleNamespace(tenant_id="tenant-kb", embd_id="embd-1")), raising=False)
monkeypatch.setattr(module.settings, "retriever", retriever)
monkeypatch.setattr(module.settings, "kg_retriever", _KgRetriever(), raising=False)
_set_request_json(
monkeypatch,
module,
{
"kb_id": ["kb-1"],
"question": "q",
"cross_languages": ["fr"],
"rerank_id": "rerank-1",
"keyword": True,
"use_kg": True,
},
)
res = _run(module.retrieval_test())
assert res["code"] == 0, res
assert cross_calls[-1] == ("q", ("fr",))
assert keyword_calls[-1] == "q-xl"
assert retriever.retrieval_questions[-1] == "q-xl-kw"
assert res["data"]["chunks"][0]["id"] == "kg-1", res
assert all("vector" not in chunk for chunk in res["data"]["chunks"])
monkeypatch.setattr(module.settings, "kg_retriever", _NoContentKgRetriever(), raising=False)
_set_request_json(monkeypatch, module, {"kb_id": ["kb-1"], "question": "q", "use_kg": True})
res = _run(module.retrieval_test())
assert res["code"] == 0, res
assert res["data"]["chunks"][0]["id"] == "c1", res
monkeypatch.setattr(module.settings, "retriever", _Retriever(mode="not_found"))
_set_request_json(monkeypatch, module, {"kb_id": ["kb-1"], "question": "q"})
res = _run(module.retrieval_test())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "No chunk found! Check the chunk status please!" in res["message"], res
monkeypatch.setattr(module.settings, "retriever", _Retriever(mode="explode"))
_set_request_json(monkeypatch, module, {"kb_id": ["kb-1"], "question": "q"})
res = _run(module.retrieval_test())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "retrieval boom" in res["message"], res
@pytest.mark.p2
def test_knowledge_graph_repeat_deal_matrix_unit(monkeypatch):
module = _load_chunk_module(monkeypatch)
module.request = SimpleNamespace(args={"doc_id": "doc-1"}, headers={})
payload = {
"id": "root",
"children": [
{"id": "dup"},
{"id": "dup", "children": [{"id": "dup"}]},
],
}
class _SRes:
ids = ["bad-json", "mind-map"]
field = {
"bad-json": {"knowledge_graph_kwd": "graph", "content_with_weight": "{bad json"},
"mind-map": {"knowledge_graph_kwd": "mind_map", "content_with_weight": json.dumps(payload)},
}
async def _search(*_args, **_kwargs):
return _SRes()
monkeypatch.setattr(module.settings.retriever, "search", _search)
res = _run(module.knowledge_graph())
assert res["code"] == 0, res
assert res["data"]["graph"] == {}, res
mind_map = res["data"]["mind_map"]
assert mind_map["children"][0]["id"] == "dup", res
assert mind_map["children"][1]["id"] == "dup(1)", res
assert mind_map["children"][1]["children"][0]["id"] == "dup(2)", res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_chunk_app/test_chunk_routes_unit.py",
"license": "Apache License 2.0",
"lines": 675,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_connector_app/test_langfuse_app_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _DummyAtomic:
def __enter__(self):
return self
def __exit__(self, _exc_type, _exc, _tb):
return False
class _FakeApiError(Exception):
pass
class _FakeLangfuseClient:
def __init__(self, *, auth_result=True, auth_exc=None, project_payload=None):
self._auth_result = auth_result
self._auth_exc = auth_exc
if project_payload is None:
project_payload = {"data": [{"id": "project-id", "name": "project-name"}]}
self.api = SimpleNamespace(
projects=SimpleNamespace(get=lambda: SimpleNamespace(dict=lambda: project_payload)),
core=SimpleNamespace(api_error=SimpleNamespace(ApiError=_FakeApiError)),
)
def auth_check(self):
if self._auth_exc is not None:
raise self._auth_exc
return self._auth_result
def _run(coro):
return asyncio.run(coro)
def _load_langfuse_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
stub_apps = ModuleType("api.apps")
stub_apps.current_user = SimpleNamespace(id="tenant-1")
stub_apps.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", stub_apps)
stub_langfuse = ModuleType("langfuse")
stub_langfuse.Langfuse = _FakeLangfuseClient
monkeypatch.setitem(sys.modules, "langfuse", stub_langfuse)
module_path = repo_root / "api" / "apps" / "langfuse_app.py"
spec = importlib.util.spec_from_file_location("test_langfuse_app_unit", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_set_api_key_missing_fields_and_invalid_auth(monkeypatch):
module = _load_langfuse_app(monkeypatch)
monkeypatch.setattr(module.DB, "atomic", lambda: _DummyAtomic())
async def missing_fields():
return {"secret_key": "", "public_key": "pub", "host": "http://host"}
monkeypatch.setattr(module, "get_request_json", missing_fields)
res = _run(module.set_api_key.__wrapped__())
assert res["code"] == 102
assert res["message"] == "Missing required fields"
async def invalid_auth():
return {"secret_key": "sec", "public_key": "pub", "host": "http://host"}
monkeypatch.setattr(module, "get_request_json", invalid_auth)
monkeypatch.setattr(module, "Langfuse", lambda **_kwargs: _FakeLangfuseClient(auth_result=False))
res = _run(module.set_api_key.__wrapped__())
assert res["code"] == 102
assert res["message"] == "Invalid Langfuse keys"
@pytest.mark.p2
def test_set_api_key_create_update_and_atomic_exception(monkeypatch):
module = _load_langfuse_app(monkeypatch)
monkeypatch.setattr(module.DB, "atomic", lambda: _DummyAtomic())
monkeypatch.setattr(module, "Langfuse", lambda **_kwargs: _FakeLangfuseClient(auth_result=True))
async def payload():
return {"secret_key": "sec", "public_key": "pub", "host": "http://host"}
monkeypatch.setattr(module, "get_request_json", payload)
calls = {"save": 0, "update": 0}
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant", lambda **_kwargs: None)
monkeypatch.setattr(
module.TenantLangfuseService,
"save",
lambda **_kwargs: calls.__setitem__("save", calls["save"] + 1),
)
monkeypatch.setattr(
module.TenantLangfuseService,
"update_by_tenant",
lambda **_kwargs: calls.__setitem__("update", calls["update"] + 1),
)
res = _run(module.set_api_key.__wrapped__())
assert res["code"] == 0
assert calls["save"] == 1
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant", lambda **_kwargs: {"id": "existing"})
res = _run(module.set_api_key.__wrapped__())
assert res["code"] == 0
assert calls["update"] == 1
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant", lambda **_kwargs: None)
def raise_save(**_kwargs):
raise RuntimeError("save failed")
monkeypatch.setattr(module.TenantLangfuseService, "save", raise_save)
res = _run(module.set_api_key.__wrapped__())
assert res["code"] == 100
assert "save failed" in res["message"]
@pytest.mark.p2
def test_get_api_key_no_record_invalid_auth_api_error_generic_error_success(monkeypatch):
module = _load_langfuse_app(monkeypatch)
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant_with_info", lambda **_kwargs: None)
res = module.get_api_key.__wrapped__()
assert res["code"] == 0
assert res["message"] == "Have not record any Langfuse keys."
base_entry = {"secret_key": "sec", "public_key": "pub", "host": "http://host"}
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant_with_info", lambda **_kwargs: dict(base_entry))
monkeypatch.setattr(module, "Langfuse", lambda **_kwargs: _FakeLangfuseClient(auth_result=False))
res = module.get_api_key.__wrapped__()
assert res["code"] == 102
assert res["message"] == "Invalid Langfuse keys loaded"
monkeypatch.setattr(
module,
"Langfuse",
lambda **_kwargs: _FakeLangfuseClient(auth_exc=_FakeApiError("api exploded")),
)
res = module.get_api_key.__wrapped__()
assert res["code"] == 0
assert "Error from Langfuse" in res["message"]
monkeypatch.setattr(
module,
"Langfuse",
lambda **_kwargs: _FakeLangfuseClient(auth_exc=RuntimeError("generic exploded")),
)
res = module.get_api_key.__wrapped__()
assert res["code"] == 100
assert "generic exploded" in res["message"]
monkeypatch.setattr(module, "Langfuse", lambda **_kwargs: _FakeLangfuseClient(auth_result=True))
res = module.get_api_key.__wrapped__()
assert res["code"] == 0
assert res["data"]["project_id"] == "project-id"
assert res["data"]["project_name"] == "project-name"
@pytest.mark.p2
def test_delete_api_key_no_record_success_exception(monkeypatch):
module = _load_langfuse_app(monkeypatch)
monkeypatch.setattr(module.DB, "atomic", lambda: _DummyAtomic())
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant", lambda **_kwargs: None)
res = module.delete_api_key.__wrapped__()
assert res["code"] == 0
assert res["message"] == "Have not record any Langfuse keys."
monkeypatch.setattr(module.TenantLangfuseService, "filter_by_tenant", lambda **_kwargs: {"id": "entry"})
monkeypatch.setattr(module.TenantLangfuseService, "delete_model", lambda _entry: None)
res = module.delete_api_key.__wrapped__()
assert res["code"] == 0
assert res["data"] is True
def raise_delete(_entry):
raise RuntimeError("delete failed")
monkeypatch.setattr(module.TenantLangfuseService, "delete_model", raise_delete)
res = module.delete_api_key.__wrapped__()
assert res["code"] == 100
assert "delete failed" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_connector_app/test_langfuse_app_unit.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_conversation_app/test_conversation_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from copy import deepcopy
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
from anyio import Path as AsyncPath
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyRequest:
def __init__(self, *, args=None, headers=None, form=None, files=None):
self.args = args or {}
self.headers = headers or {}
self.form = _AwaitableValue(form or {})
self.files = _AwaitableValue(files or {})
self.method = "POST"
self.content_length = 0
class _DummyConversation:
def __init__(self, *, conv_id="conv-1", dialog_id="dialog-1", message=None, reference=None):
self.id = conv_id
self.dialog_id = dialog_id
self.message = message if message is not None else []
self.reference = reference if reference is not None else []
def to_dict(self):
return {
"id": self.id,
"dialog_id": self.dialog_id,
"message": deepcopy(self.message),
"reference": deepcopy(self.reference),
}
class _DummyDialog:
def __init__(self, *, dialog_id="dialog-1", tenant_id="tenant-1", icon="avatar.png"):
self.id = dialog_id
self.tenant_id = tenant_id
self.icon = icon
self.prompt_config = {"prologue": "hello"}
self.llm_id = ""
self.llm_setting = {}
def to_dict(self):
return {
"id": self.id,
"icon": self.icon,
"tenant_id": self.tenant_id,
"prompt_config": deepcopy(self.prompt_config),
}
class _DummyUploadedFile:
def __init__(self, filename):
self.filename = filename
self.saved_path = None
async def save(self, path):
self.saved_path = path
await AsyncPath(path).write_bytes(b"audio-bytes")
def _run(coro):
return asyncio.run(coro)
def _load_conversation_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
deepdoc_pkg = ModuleType("deepdoc")
deepdoc_parser_pkg = ModuleType("deepdoc.parser")
deepdoc_parser_pkg.__path__ = []
class _StubPdfParser:
pass
class _StubExcelParser:
pass
class _StubDocxParser:
pass
deepdoc_parser_pkg.PdfParser = _StubPdfParser
deepdoc_parser_pkg.ExcelParser = _StubExcelParser
deepdoc_parser_pkg.DocxParser = _StubDocxParser
deepdoc_pkg.parser = deepdoc_parser_pkg
monkeypatch.setitem(sys.modules, "deepdoc", deepdoc_pkg)
monkeypatch.setitem(sys.modules, "deepdoc.parser", deepdoc_parser_pkg)
deepdoc_excel_module = ModuleType("deepdoc.parser.excel_parser")
deepdoc_excel_module.RAGFlowExcelParser = _StubExcelParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.excel_parser", deepdoc_excel_module)
deepdoc_parser_utils = ModuleType("deepdoc.parser.utils")
deepdoc_parser_utils.get_text = lambda *_args, **_kwargs: ""
monkeypatch.setitem(sys.modules, "deepdoc.parser.utils", deepdoc_parser_utils)
monkeypatch.setitem(sys.modules, "xgboost", ModuleType("xgboost"))
apps_mod = ModuleType("api.apps")
apps_mod.current_user = SimpleNamespace(id="user-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
module_name = "test_conversation_routes_unit_module"
module_path = repo_root / "api" / "apps" / "conversation_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(deepcopy(payload)))
async def _read_sse_text(response):
chunks = []
async for chunk in response.response:
if isinstance(chunk, bytes):
chunks.append(chunk.decode("utf-8"))
else:
chunks.append(chunk)
return "".join(chunks)
@pytest.mark.p2
def test_set_conversation_update_create_and_errors(monkeypatch):
module = _load_conversation_module(monkeypatch)
long_name = "n" * 300
create_payload = {
"conversation_id": "conv-new",
"dialog_id": "dialog-1",
"is_new": True,
"name": long_name,
}
_set_request_json(monkeypatch, module, create_payload)
saved = {}
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (True, _DummyDialog()))
monkeypatch.setattr(module.ConversationService, "save", lambda **kwargs: saved.update(kwargs) or True)
res = _run(module.set_conversation())
assert res["code"] == 0
assert len(res["data"]["name"]) == 255
assert saved["user_id"] == "user-1"
update_payload = {
"conversation_id": "conv-1",
"dialog_id": "dialog-1",
"is_new": False,
"name": "rename",
}
_set_request_json(monkeypatch, module, update_payload)
monkeypatch.setattr(module.ConversationService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(module.set_conversation())
assert "Conversation not found" in res["message"]
_set_request_json(monkeypatch, module, update_payload)
monkeypatch.setattr(module.ConversationService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(module.set_conversation())
assert "Fail to update" in res["message"]
_set_request_json(monkeypatch, module, update_payload)
monkeypatch.setattr(module.ConversationService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, _DummyConversation(conv_id="conv-1")))
res = _run(module.set_conversation())
assert res["code"] == 0
assert res["data"]["id"] == "conv-1"
_set_request_json(monkeypatch, module, update_payload)
def _raise_update(*_args, **_kwargs):
raise RuntimeError("update boom")
monkeypatch.setattr(module.ConversationService, "update_by_id", _raise_update)
res = _run(module.set_conversation())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "update boom" in res["message"]
missing_dialog_payload = {
"conversation_id": "conv-2",
"dialog_id": "dialog-missing",
"is_new": True,
"name": "create",
}
_set_request_json(monkeypatch, module, missing_dialog_payload)
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (False, None))
res = _run(module.set_conversation())
assert res["message"] == "Dialog not found"
_set_request_json(monkeypatch, module, missing_dialog_payload)
def _raise_dialog(_id):
raise RuntimeError("dialog boom")
monkeypatch.setattr(module.DialogService, "get_by_id", _raise_dialog)
res = _run(module.set_conversation())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "dialog boom" in res["message"]
@pytest.mark.p2
def test_get_and_getsse_authorization_and_reference_paths(monkeypatch):
module = _load_conversation_module(monkeypatch)
conv = _DummyConversation(reference=[{"doc": "d"}, ["already-formatted"]])
monkeypatch.setattr(module, "request", _DummyRequest(args={"conversation_id": "conv-1"}))
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv))
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(icon="bot-avatar")])
monkeypatch.setattr(module, "chunks_format", lambda _ref: [{"chunk": "normalized"}])
res = _run(module.get())
assert res["code"] == 0
assert res["data"]["avatar"] == "bot-avatar"
assert res["data"]["reference"][0]["chunks"] == [{"chunk": "normalized"}]
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(module.get())
assert res["message"] == "Conversation not found!"
monkeypatch.setattr(module, "request", _DummyRequest(args={"conversation_id": "conv-1"}))
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv))
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(module.get())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "Only owner of conversation" in res["message"]
def _raise_get(*_args, **_kwargs):
raise RuntimeError("get boom")
monkeypatch.setattr(module.ConversationService, "get_by_id", _raise_get)
res = _run(module.get())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "get boom" in res["message"]
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Authorization": "Bearer"}))
res = module.getsse("dialog-1")
assert "Authorization is not valid" in res["message"]
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Authorization": "Bearer token-1"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = module.getsse("dialog-1")
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace()])
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (False, None))
res = module.getsse("dialog-1")
assert res["message"] == "Dialog not found!"
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (True, _DummyDialog()))
res = module.getsse("dialog-1")
assert res["code"] == 0
assert res["data"]["avatar"] == "avatar.png"
assert "icon" not in res["data"]
def _raise_getsse(_id):
raise RuntimeError("getsse boom")
monkeypatch.setattr(module.DialogService, "get_by_id", _raise_getsse)
res = module.getsse("dialog-1")
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "getsse boom" in res["message"]
@pytest.mark.p2
def test_rm_and_list_conversation_guards(monkeypatch):
module = _load_conversation_module(monkeypatch)
_set_request_json(monkeypatch, module, {"conversation_ids": ["conv-1"]})
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(module.rm())
assert "Conversation not found" in res["message"]
conv = _DummyConversation(conv_id="conv-1", dialog_id="dialog-1")
_set_request_json(monkeypatch, module, {"conversation_ids": ["conv-1"]})
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv))
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(module.rm())
assert res["code"] == module.RetCode.OPERATING_ERROR
deleted = []
_set_request_json(monkeypatch, module, {"conversation_ids": ["conv-1"]})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="dialog-1")])
monkeypatch.setattr(module.ConversationService, "delete_by_id", lambda cid: deleted.append(cid) or True)
res = _run(module.rm())
assert res["code"] == 0
assert res["data"] is True
assert deleted == ["conv-1"]
_set_request_json(monkeypatch, module, {"conversation_ids": ["conv-1"]})
def _raise_rm(*_args, **_kwargs):
raise RuntimeError("rm boom")
monkeypatch.setattr(module.ConversationService, "get_by_id", _raise_rm)
res = _run(module.rm())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "rm boom" in res["message"]
monkeypatch.setattr(module, "request", _DummyRequest(args={"dialog_id": "dialog-1"}))
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(module.list_conversation())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "Only owner of dialog" in res["message"]
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="dialog-1")])
monkeypatch.setattr(module.ConversationService, "model", SimpleNamespace(create_time="create_time"))
monkeypatch.setattr(module.ConversationService, "query", lambda **_kwargs: [_DummyConversation(conv_id="c1"), _DummyConversation(conv_id="c2")])
res = _run(module.list_conversation())
assert res["code"] == 0
assert [x["id"] for x in res["data"]] == ["c1", "c2"]
def _raise_list(**_kwargs):
raise RuntimeError("list boom")
monkeypatch.setattr(module.ConversationService, "query", _raise_list)
res = _run(module.list_conversation())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "list boom" in res["message"]
@pytest.mark.p2
def test_completion_stream_and_nonstream_branches(monkeypatch):
module = _load_conversation_module(monkeypatch)
conv = _DummyConversation(conv_id="conv-1", dialog_id="dialog-1", reference=[])
dia = _DummyDialog(dialog_id="dialog-1", tenant_id="tenant-1")
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv))
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (True, dia))
monkeypatch.setattr(module, "structure_answer", lambda _conv, ans, message_id, conv_id: {"answer": ans["answer"], "id": message_id, "conversation_id": conv_id, "reference": []})
updates = []
monkeypatch.setattr(module.ConversationService, "update_by_id", lambda conv_id, payload: updates.append((conv_id, payload)) or True)
stream_payload = {
"conversation_id": "conv-1",
"messages": [
{"role": "system", "content": "ignored"},
{"role": "assistant", "content": "ignored-first-assistant"},
{"role": "user", "content": "hello", "id": "m-1"},
],
"stream": True,
}
async def _stream_ok(_dia, sanitized, *_args, **_kwargs):
assert [m["role"] for m in sanitized] == ["user"]
yield {"answer": "sse-ok"}
monkeypatch.setattr(module, "async_chat", _stream_ok)
_set_request_json(monkeypatch, module, stream_payload)
resp = _run(module.completion.__wrapped__())
assert resp.headers["Content-Type"].startswith("text/event-stream")
sse_text = _run(_read_sse_text(resp))
assert "sse-ok" in sse_text
assert '"data": true' in sse_text
assert updates
async def _stream_error(_dia, _sanitized, *_args, **_kwargs):
raise RuntimeError("stream explode")
if False:
yield {"answer": "never"}
monkeypatch.setattr(module, "async_chat", _stream_error)
_set_request_json(monkeypatch, module, stream_payload)
resp = _run(module.completion.__wrapped__())
sse_text = _run(_read_sse_text(resp))
assert "**ERROR**: stream explode" in sse_text
async def _non_stream(_dia, _sanitized, **_kwargs):
yield {"answer": "plain-ok"}
monkeypatch.setattr(module, "async_chat", _non_stream)
_set_request_json(
monkeypatch,
module,
{
"conversation_id": "conv-1",
"messages": [{"role": "user", "content": "plain", "id": "m-2"}],
"stream": False,
},
)
res = _run(module.completion.__wrapped__())
assert res["code"] == 0
assert res["data"]["answer"] == "plain-ok"
monkeypatch.setattr(module.TenantLLMService, "get_api_key", lambda **_kwargs: False)
_set_request_json(
monkeypatch,
module,
{
"conversation_id": "conv-1",
"messages": [{"role": "user", "content": "embed", "id": "m-3"}],
"llm_id": "bad-model",
"stream": False,
},
)
res = _run(module.completion.__wrapped__())
assert "Cannot use specified model bad-model" in res["message"]
monkeypatch.setattr(module.TenantLLMService, "get_api_key", lambda **_kwargs: "api-key")
_set_request_json(
monkeypatch,
module,
{
"conversation_id": "conv-1",
"messages": [{"role": "user", "content": "embed", "id": "m-4"}],
"llm_id": "glm-4",
"temperature": 0.7,
"top_p": 0.2,
"stream": False,
},
)
res = _run(module.completion.__wrapped__())
assert res["code"] == 0
assert dia.llm_id == "glm-4"
assert dia.llm_setting == {"temperature": 0.7, "top_p": 0.2}
_set_request_json(
monkeypatch,
module,
{
"conversation_id": "missing",
"messages": [{"role": "user", "content": "x", "id": "m-5"}],
"stream": False,
},
)
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(module.completion.__wrapped__())
assert res["message"] == "Conversation not found!"
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv))
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (False, None))
_set_request_json(
monkeypatch,
module,
{
"conversation_id": "conv-1",
"messages": [{"role": "user", "content": "x", "id": "m-6"}],
"stream": False,
},
)
res = _run(module.completion.__wrapped__())
assert res["message"] == "Dialog not found!"
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (_ for _ in ()).throw(RuntimeError("completion boom")))
_set_request_json(
monkeypatch,
module,
{
"conversation_id": "conv-1",
"messages": [{"role": "user", "content": "x", "id": "m-7"}],
"stream": False,
},
)
res = _run(module.completion.__wrapped__())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "completion boom" in res["message"]
@pytest.mark.p2
def test_sequence2txt_validation_and_transcription_paths(monkeypatch):
module = _load_conversation_module(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "false"}, files={}))
res = _run(module.sequence2txt())
assert "Missing 'file'" in res["message"]
bad_file = _DummyUploadedFile("audio.txt")
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "false"}, files={"file": bad_file}))
res = _run(module.sequence2txt())
assert "Unsupported audio format" in res["message"]
wav_file = _DummyUploadedFile("audio.wav")
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "false"}, files={"file": wav_file}))
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [])
res = _run(module.sequence2txt())
assert res["message"] == "Tenant not found!"
wav_file = _DummyUploadedFile("audio.wav")
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "false"}, files={"file": wav_file}))
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [{"tenant_id": "tenant-1", "asr_id": ""}])
res = _run(module.sequence2txt())
assert res["message"] == "No default ASR model is set"
class _SyncAsr:
def transcription(self, _path):
return "transcribed text"
def stream_transcription(self, _path):
return []
wav_file = _DummyUploadedFile("audio.wav")
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "false"}, files={"file": wav_file}))
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [{"tenant_id": "tenant-1", "asr_id": "asr-model"}])
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _SyncAsr())
monkeypatch.setattr(module.os, "remove", lambda _path: (_ for _ in ()).throw(RuntimeError("remove failed")))
res = _run(module.sequence2txt())
assert res["code"] == 0
assert res["data"]["text"] == "transcribed text"
class _StreamAsr:
def transcription(self, _path):
return ""
def stream_transcription(self, _path):
yield {"event": "partial", "text": "hello"}
wav_file = _DummyUploadedFile("audio.wav")
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "true"}, files={"file": wav_file}))
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _StreamAsr())
resp = _run(module.sequence2txt())
assert resp.headers["Content-Type"].startswith("text/event-stream")
sse_text = _run(_read_sse_text(resp))
assert '"event": "partial"' in sse_text
class _ErrorStreamAsr:
def transcription(self, _path):
return ""
def stream_transcription(self, _path):
raise RuntimeError("stream asr boom")
wav_file = _DummyUploadedFile("audio.wav")
monkeypatch.setattr(module, "request", _DummyRequest(form={"stream": "true"}, files={"file": wav_file}))
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _ErrorStreamAsr())
resp = _run(module.sequence2txt())
sse_text = _run(_read_sse_text(resp))
assert "stream asr boom" in sse_text
@pytest.mark.p2
def test_tts_request_parse_entry(monkeypatch):
module = _load_conversation_module(monkeypatch)
_set_request_json(monkeypatch, module, {"text": "A。B"})
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [])
res = _run(module.tts())
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [{"tenant_id": "tenant-1", "tts_id": ""}])
res = _run(module.tts())
assert res["message"] == "No default TTS model is set"
class _TTSOk:
def tts(self, txt):
if not txt:
return []
yield f"chunk-{txt}".encode("utf-8")
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [{"tenant_id": "tenant-1", "tts_id": "tts-x"}])
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _TTSOk())
resp = _run(module.tts())
assert resp.mimetype == "audio/mpeg"
assert resp.headers.get("Cache-Control") == "no-cache"
assert resp.headers.get("Connection") == "keep-alive"
assert resp.headers.get("X-Accel-Buffering") == "no"
stream_text = _run(_read_sse_text(resp))
assert "chunk-A" in stream_text
assert "chunk-B" in stream_text
class _TTSErr:
def tts(self, _txt):
raise RuntimeError("tts boom")
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _TTSErr())
resp = _run(module.tts())
stream_text = _run(_read_sse_text(resp))
assert '"code": 500' in stream_text
assert "**ERROR**: tts boom" in stream_text
@pytest.mark.p2
def test_delete_msg_and_thumbup_matrix_unit(monkeypatch):
module = _load_conversation_module(monkeypatch)
updates = []
monkeypatch.setattr(module.ConversationService, "update_by_id", lambda conv_id, payload: updates.append((conv_id, payload)) or True)
_set_request_json(monkeypatch, module, {"conversation_id": "missing", "message_id": "pair-1"})
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(module.delete_msg.__wrapped__())
assert res["message"] == "Conversation not found!"
conv = _DummyConversation(
conv_id="conv-del",
message=[
{"id": "other", "role": "user"},
{"id": "pair-1", "role": "user"},
{"id": "pair-1", "role": "assistant"},
],
reference=[{"chunks": [{"id": "c1"}]}],
)
_set_request_json(monkeypatch, module, {"conversation_id": "conv-del", "message_id": "pair-1"})
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv))
res = _run(module.delete_msg.__wrapped__())
assert res["code"] == 0
assert [m["id"] for m in res["data"]["message"]] == ["other"]
assert res["data"]["reference"] == []
assert updates[-1][0] == "conv-del"
_set_request_json(monkeypatch, module, {"conversation_id": "missing", "message_id": "assistant-1", "thumbup": True})
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (False, None))
res = _run(module.thumbup.__wrapped__())
assert res["message"] == "Conversation not found!"
conv_up = _DummyConversation(
conv_id="conv-up",
message=[{"id": "assistant-1", "role": "assistant", "feedback": "old"}],
)
_set_request_json(monkeypatch, module, {"conversation_id": "conv-up", "message_id": "assistant-1", "thumbup": True})
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv_up))
res = _run(module.thumbup.__wrapped__())
assert res["code"] == 0
assert res["data"]["message"][0]["thumbup"] is True
assert "feedback" not in res["data"]["message"][0]
conv_down = _DummyConversation(conv_id="conv-down", message=[{"id": "assistant-2", "role": "assistant"}])
_set_request_json(
monkeypatch,
module,
{"conversation_id": "conv-down", "message_id": "assistant-2", "thumbup": False, "feedback": "needs sources"},
)
monkeypatch.setattr(module.ConversationService, "get_by_id", lambda _id: (True, conv_down))
res = _run(module.thumbup.__wrapped__())
assert res["code"] == 0
assert res["data"]["message"][0]["thumbup"] is False
assert res["data"]["message"][0]["feedback"] == "needs sources"
@pytest.mark.p2
def test_ask_about_stream_search_config_matrix_unit(monkeypatch):
module = _load_conversation_module(monkeypatch)
_set_request_json(monkeypatch, module, {"question": "q", "kb_ids": ["kb-1"], "search_id": "search-1"})
monkeypatch.setattr(module.SearchService, "get_detail", lambda _sid: {"search_config": {"mode": "test"}})
captured = {}
async def _fake_async_ask(question, kb_ids, uid, search_config=None):
captured["question"] = question
captured["kb_ids"] = kb_ids
captured["uid"] = uid
captured["search_config"] = search_config
yield {"answer": "first"}
raise RuntimeError("ask boom")
monkeypatch.setattr(module, "async_ask", _fake_async_ask)
resp = _run(module.ask_about.__wrapped__())
assert resp.headers["Content-Type"] == "text/event-stream; charset=utf-8"
sse_text = _run(_read_sse_text(resp))
assert '"answer": "first"' in sse_text
assert "**ERROR**: ask boom" in sse_text
assert '"data": true' in sse_text.lower()
assert captured == {"question": "q", "kb_ids": ["kb-1"], "uid": "user-1", "search_config": {"mode": "test"}}
@pytest.mark.p2
def test_mindmap_and_related_questions_matrix_unit(monkeypatch):
module = _load_conversation_module(monkeypatch)
def _search_detail(_sid):
return {
"tenant_id": "tenant-x",
"search_config": {
"kb_ids": ["kb-2", "kb-3"],
"chat_id": "chat-x",
"llm_setting": {"temperature": 0.2, "parameter": {"k": "v"}},
},
}
monkeypatch.setattr(module.SearchService, "get_detail", _search_detail)
_set_request_json(monkeypatch, module, {"question": "mindmap-q", "kb_ids": ["kb-1", "kb-2"], "search_id": "search-1"})
mindmap_calls = {}
async def _gen_ok(question, kb_ids, tenant_id, search_config):
mindmap_calls["question"] = question
mindmap_calls["kb_ids"] = set(kb_ids)
mindmap_calls["tenant_id"] = tenant_id
mindmap_calls["search_config"] = search_config
return {"nodes": [question]}
monkeypatch.setattr(module, "gen_mindmap", _gen_ok)
res = _run(module.mindmap.__wrapped__())
assert res["code"] == 0
assert res["data"] == {"nodes": ["mindmap-q"]}
assert mindmap_calls["kb_ids"] == {"kb-1", "kb-2", "kb-3"}
assert mindmap_calls["tenant_id"] == "tenant-x"
assert set(mindmap_calls["search_config"]["kb_ids"]) == {"kb-1", "kb-2", "kb-3"}
async def _gen_error(*_args, **_kwargs):
return {"error": "mindmap boom"}
monkeypatch.setattr(module, "gen_mindmap", _gen_error)
res = _run(module.mindmap.__wrapped__())
assert "mindmap boom" in res["message"]
llm_calls = {}
class _FakeChat:
async def async_chat(self, prompt, messages, options):
llm_calls["prompt"] = prompt
llm_calls["messages"] = messages
llm_calls["options"] = options
return "1. Alpha\n2. Beta\nignored"
def _fake_bundle(tenant_id, llm_type, chat_id):
llm_calls["bundle"] = (tenant_id, llm_type, chat_id)
return _FakeChat()
monkeypatch.setattr(module, "LLMBundle", _fake_bundle)
monkeypatch.setattr(module, "load_prompt", lambda name: f"prompt-{name}")
_set_request_json(monkeypatch, module, {"question": "solar", "search_id": "search-1"})
res = _run(module.related_questions.__wrapped__())
assert res["code"] == 0
assert res["data"] == ["Alpha", "Beta"]
assert llm_calls["bundle"][0] == "user-1"
assert llm_calls["bundle"][2] == "chat-x"
assert llm_calls["options"] == {"temperature": 0.2}
assert llm_calls["prompt"] == "prompt-related_question"
assert "Keywords: solar" in llm_calls["messages"][0]["content"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_conversation_app/test_conversation_routes_unit.py",
"license": "Apache License 2.0",
"lines": 624,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_kb_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import json
import sys
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
pytestmark = pytest.mark.filterwarnings("ignore:.*joblib will operate in serial mode.*:UserWarning")
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyArgs(dict):
def getlist(self, key):
value = self.get(key)
if value is None:
return []
if isinstance(value, list):
return value
return [value]
class _DummyKB:
def __init__(self, *, kb_id="kb-1", name="old_kb", tenant_id="tenant-1", pagerank=0):
self.id = kb_id
self.name = name
self.tenant_id = tenant_id
self.pagerank = pagerank
self.parser_config = {}
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"tenant_id": self.tenant_id,
"pagerank": self.pagerank,
"parser_config": deepcopy(self.parser_config),
}
class _DummyTask:
def __init__(self, task_id, progress):
self.id = task_id
self.progress = progress
def to_dict(self):
return {"id": self.id, "progress": self.progress}
def _run(coro):
return asyncio.run(coro)
def _unwrap_route(func):
route_func = inspect.unwrap(func)
visited = set()
while getattr(route_func, "__closure__", None) and route_func not in visited:
visited.add(route_func)
nested = None
for cell in route_func.__closure__:
candidate = cell.cell_contents
if inspect.isfunction(candidate) and candidate is not route_func:
nested = inspect.unwrap(candidate)
break
if nested is None:
break
route_func = nested
return route_func
def _load_kb_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
deepdoc_pkg = ModuleType("deepdoc")
deepdoc_parser_pkg = ModuleType("deepdoc.parser")
deepdoc_parser_pkg.__path__ = []
class _StubPdfParser:
pass
class _StubExcelParser:
pass
class _StubDocxParser:
pass
deepdoc_parser_pkg.PdfParser = _StubPdfParser
deepdoc_parser_pkg.ExcelParser = _StubExcelParser
deepdoc_parser_pkg.DocxParser = _StubDocxParser
deepdoc_pkg.parser = deepdoc_parser_pkg
monkeypatch.setitem(sys.modules, "deepdoc", deepdoc_pkg)
monkeypatch.setitem(sys.modules, "deepdoc.parser", deepdoc_parser_pkg)
deepdoc_excel_module = ModuleType("deepdoc.parser.excel_parser")
deepdoc_excel_module.RAGFlowExcelParser = _StubExcelParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.excel_parser", deepdoc_excel_module)
deepdoc_parser_utils = ModuleType("deepdoc.parser.utils")
deepdoc_parser_utils.get_text = lambda *_args, **_kwargs: ""
monkeypatch.setitem(sys.modules, "deepdoc.parser.utils", deepdoc_parser_utils)
monkeypatch.setitem(sys.modules, "xgboost", ModuleType("xgboost"))
apps_mod = ModuleType("api.apps")
apps_mod.current_user = SimpleNamespace(id="user-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
module_name = "test_kb_routes_unit_module"
module_path = repo_root / "api" / "apps" / "kb_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(deepcopy(payload)))
def _set_request_args(monkeypatch, module, args):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs(args)))
def _base_update_payload(**kwargs):
payload = {"kb_id": "kb-1", "name": "new_kb", "description": "", "parser_id": "naive"}
payload.update(kwargs)
return payload
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
@pytest.mark.p2
def test_create_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_json(monkeypatch, module, {"name": "early"})
monkeypatch.setattr(module.KnowledgebaseService, "create_with_name", lambda **_kwargs: (False, {"code": 777, "message": "early"}))
res = _run(inspect.unwrap(module.create)())
assert res["code"] == 777, res
_set_request_json(monkeypatch, module, {"name": "save-fail"})
monkeypatch.setattr(module.KnowledgebaseService, "create_with_name", lambda **_kwargs: (True, {"id": "kb-1"}))
monkeypatch.setattr(module.KnowledgebaseService, "save", lambda **_kwargs: False)
res = _run(inspect.unwrap(module.create)())
assert res["code"] == module.RetCode.DATA_ERROR, res
_set_request_json(monkeypatch, module, {"name": "save-ok"})
monkeypatch.setattr(module.KnowledgebaseService, "save", lambda **_kwargs: True)
res = _run(inspect.unwrap(module.create)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["kb_id"] == "kb-1", res
_set_request_json(monkeypatch, module, {"name": "save-ex"})
def _raise_save(**_kwargs):
raise RuntimeError("save boom")
monkeypatch.setattr(module.KnowledgebaseService, "save", _raise_save)
res = _run(inspect.unwrap(module.create)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "save boom" in res["message"], res
@pytest.mark.p2
def test_update_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
update_route = _unwrap_route(module.update)
_set_request_json(monkeypatch, module, _base_update_payload(name=1))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "must be string" in res["message"], res
_set_request_json(monkeypatch, module, _base_update_payload(name=" "))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "can't be empty" in res["message"], res
_set_request_json(monkeypatch, module, _base_update_payload(name="a" * 129))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "large than" in res["message"], res
monkeypatch.setattr(module.settings, "DOC_ENGINE_INFINITY", True)
_set_request_json(monkeypatch, module, _base_update_payload(parser_id="tag"))
res = _run(update_route())
assert res["code"] == module.RetCode.OPERATING_ERROR, res
_set_request_json(monkeypatch, module, _base_update_payload(pagerank=50))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "pagerank" in res["message"], res
monkeypatch.setattr(module.settings, "DOC_ENGINE_INFINITY", False)
monkeypatch.setattr(module.KnowledgebaseService, "accessible4deletion", lambda *_args, **_kwargs: False)
_set_request_json(monkeypatch, module, _base_update_payload())
res = _run(update_route())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible4deletion", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
_set_request_json(monkeypatch, module, _base_update_payload())
res = _run(update_route())
assert res["code"] == module.RetCode.OPERATING_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **kwargs: [SimpleNamespace(id="kb-1")] if kwargs.get("created_by") else [])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
_set_request_json(monkeypatch, module, _base_update_payload())
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Can't find this dataset" in res["message"], res
kb = _DummyKB(kb_id="kb-1", name="old_name", pagerank=0)
def _query_duplicate(**kwargs):
if kwargs.get("created_by"):
return [SimpleNamespace(id="kb-1")]
if kwargs.get("name"):
return [SimpleNamespace(id="dup")]
return []
monkeypatch.setattr(module.KnowledgebaseService, "query", _query_duplicate)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module.FileService, "filter_update", lambda *_args, **_kwargs: None)
_set_request_json(monkeypatch, module, _base_update_payload(name="new_name"))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Duplicated dataset name" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **kwargs: [SimpleNamespace(id="kb-1")] if kwargs.get("created_by") else [])
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: False)
_set_request_json(monkeypatch, module, _base_update_payload(name="new_name", connectors=["c1"]))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
async def _thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec)
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(update=lambda *_args, **_kwargs: True))
monkeypatch.setattr(module.search, "index_name", lambda _tenant: "idx")
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.Connector2KbService, "link_connectors", lambda *_args, **_kwargs: ["warn"])
monkeypatch.setattr(module.logging, "error", lambda *_args, **_kwargs: None)
kb_first = _DummyKB(kb_id="kb-1", name="old_name", pagerank=0)
kb_second = _DummyKB(kb_id="kb-1", name="new_kb", pagerank=50)
get_by_id_results = [(True, kb_first), (True, kb_second)]
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: get_by_id_results.pop(0))
_set_request_json(monkeypatch, module, _base_update_payload(name="new_kb", pagerank=50, connectors=["conn-1"]))
res = _run(update_route())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["connectors"] == ["conn-1"], res
kb_first = _DummyKB(kb_id="kb-1", name="old_name", pagerank=50)
kb_second = _DummyKB(kb_id="kb-1", name="new_kb", pagerank=0)
get_by_id_results = [(True, kb_first), (True, kb_second)]
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: get_by_id_results.pop(0))
monkeypatch.setattr(module.Connector2KbService, "link_connectors", lambda *_args, **_kwargs: [])
_set_request_json(monkeypatch, module, _base_update_payload(name="new_kb", pagerank=0))
res = _run(update_route())
assert res["code"] == module.RetCode.SUCCESS, res
kb_first = _DummyKB(kb_id="kb-1", name="old_name", pagerank=0)
get_by_id_results = [(True, kb_first), (False, None)]
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: get_by_id_results.pop(0))
_set_request_json(monkeypatch, module, _base_update_payload(name="new_kb"))
res = _run(update_route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Database error" in res["message"], res
def _raise_query(**_kwargs):
raise RuntimeError("update boom")
monkeypatch.setattr(module.KnowledgebaseService, "query", _raise_query)
_set_request_json(monkeypatch, module, _base_update_payload())
res = _run(update_route())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "update boom" in res["message"], res
@pytest.mark.p2
def test_update_metadata_setting_not_found(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_json(monkeypatch, module, {"kb_id": "missing-kb", "metadata": {}})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(inspect.unwrap(module.update_metadata_setting)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Database error" in res["message"], res
@pytest.mark.p2
def test_detail_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_args(monkeypatch, module, {"kb_id": "kb-1"})
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
res = inspect.unwrap(module.detail)()
assert res["code"] == module.RetCode.OPERATING_ERROR, res
_set_request_args(monkeypatch, module, {"kb_id": "kb-1"})
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [SimpleNamespace(id="kb-1")])
monkeypatch.setattr(module.KnowledgebaseService, "get_detail", lambda _kb_id: None)
res = inspect.unwrap(module.detail)()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Can't find this dataset" in res["message"], res
finish_at = datetime(2025, 1, 1, 12, 30, 0)
kb_detail = {
"id": "kb-1",
"parser_config": {"metadata": {"x": "y"}},
"graphrag_task_finish_at": finish_at,
"raptor_task_finish_at": finish_at,
"mindmap_task_finish_at": finish_at,
}
monkeypatch.setattr(module.KnowledgebaseService, "get_detail", lambda _kb_id: deepcopy(kb_detail))
monkeypatch.setattr(module.DocumentService, "get_total_size_by_kb_id", lambda **_kwargs: 1024)
monkeypatch.setattr(module.Connector2KbService, "list_connectors", lambda _kb_id: ["conn-1"])
monkeypatch.setattr(module, "turn2jsonschema", lambda metadata: {"type": "object", "properties": metadata})
res = inspect.unwrap(module.detail)()
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["size"] == 1024, res
assert res["data"]["connectors"] == ["conn-1"], res
assert isinstance(res["data"]["parser_config"]["metadata"], dict), res
assert res["data"]["graphrag_task_finish_at"] == "2025-01-01 12:30:00", res
def _raise_tenants(**_kwargs):
raise RuntimeError("detail boom")
monkeypatch.setattr(module.UserTenantService, "query", _raise_tenants)
res = inspect.unwrap(module.detail)()
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "detail boom" in res["message"], res
@pytest.mark.p2
def test_list_kbs_owner_ids_and_desc(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_args(monkeypatch, module, {"keywords": "", "page": "1", "page_size": "2", "parser_id": "naive", "orderby": "create_time", "desc": "false"})
_set_request_json(monkeypatch, module, {})
monkeypatch.setattr(module.TenantService, "get_joined_tenants_by_user_id", lambda _uid: [{"tenant_id": "tenant-1"}])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_tenant_ids", lambda *_args, **_kwargs: ([{"id": "kb-1", "tenant_id": "tenant-1"}], 1))
res = _run(inspect.unwrap(module.list_kbs)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["total"] == 1, res
_set_request_json(monkeypatch, module, {"owner_ids": ["tenant-1"]})
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_tenant_ids",
lambda *_args, **_kwargs: (
[{"id": "kb-1", "tenant_id": "tenant-1"}, {"id": "kb-2", "tenant_id": "tenant-2"}],
2,
),
)
res = _run(inspect.unwrap(module.list_kbs)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["total"] == 1, res
assert all(kb["tenant_id"] == "tenant-1" for kb in res["data"]["kbs"]), res
def _raise_kb_list(*_args, **_kwargs):
raise RuntimeError("list boom")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_tenant_ids", _raise_kb_list)
res = _run(inspect.unwrap(module.list_kbs)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "list boom" in res["message"], res
@pytest.mark.p2
def test_rm_and_rm_sync_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_json(monkeypatch, module, {"kb_id": "kb-1"})
monkeypatch.setattr(module.KnowledgebaseService, "accessible4deletion", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible4deletion", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.OPERATING_ERROR, res
async def _thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec)
kbs = [SimpleNamespace(id="kb-1", tenant_id="tenant-1", name="kb-1")]
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: kbs)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [SimpleNamespace(id="doc-1")])
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Document removal" in res["message"], res
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.File2DocumentService, "get_by_document_id", lambda _doc_id: [SimpleNamespace(file_id="file-1")])
monkeypatch.setattr(module.FileService, "filter_delete", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.File2DocumentService, "delete_by_document_id", lambda _doc_id: None)
class _DocStore:
def delete(self, *_args, **_kwargs):
raise RuntimeError("drop failed")
def delete_idx(self, *_args, **_kwargs):
return True
monkeypatch.setattr(module.settings, "docStoreConn", _DocStore())
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
monkeypatch.setattr(module.KnowledgebaseService, "delete_by_id", lambda _kb_id: False)
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Knowledgebase removal" in res["message"], res
class _Storage:
def __init__(self):
self.removed = []
def remove_bucket(self, kb_id):
self.removed.append(kb_id)
storage = _Storage()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage)
class _GoodDocStore:
def delete(self, *_args, **_kwargs):
return True
def delete_idx(self, *_args, **_kwargs):
return True
monkeypatch.setattr(module.settings, "docStoreConn", _GoodDocStore())
monkeypatch.setattr(module.KnowledgebaseService, "delete_by_id", lambda _kb_id: True)
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] is True, res
assert storage.removed == ["kb-1"], storage.removed
def _raise_rm(**_kwargs):
raise RuntimeError("rm boom")
monkeypatch.setattr(module.KnowledgebaseService, "query", _raise_rm)
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "rm boom" in res["message"], res
@pytest.mark.p2
def test_tags_and_meta_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.list_tags)("kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserTenantService, "get_tenants_by_user_id", lambda _uid: [{"tenant_id": "tenant-1"}, {"tenant_id": "tenant-2"}])
monkeypatch.setattr(module.settings, "retriever", SimpleNamespace(all_tags=lambda tenant_id, kb_ids: [f"{tenant_id}:{kb_ids[0]}"]))
res = inspect.unwrap(module.list_tags)("kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert len(res["data"]) == 2, res
_set_request_args(monkeypatch, module, {"kb_ids": "kb-1,kb-2"})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda kb_id, _uid: kb_id == "kb-1")
res = inspect.unwrap(module.list_tags_from_kbs)()
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
res = inspect.unwrap(module.list_tags_from_kbs)()
assert res["code"] == module.RetCode.SUCCESS, res
assert isinstance(res["data"], list), res
_set_request_json(monkeypatch, module, {"tags": ["a", "b"]})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.rm_tags)("kb-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _DummyKB(tenant_id="tenant-1")))
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(update=lambda *_args, **_kwargs: True))
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
res = _run(inspect.unwrap(module.rm_tags)("kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
_set_request_json(monkeypatch, module, {"from_tag": "a", "to_tag": "b"})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.rename_tags)("kb-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
res = _run(inspect.unwrap(module.rename_tags)("kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
_set_request_args(monkeypatch, module, {"kb_ids": "kb-1,kb-2"})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda kb_id, _uid: kb_id == "kb-1")
res = inspect.unwrap(module.get_meta)()
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda _kb_ids: {"source": ["a"]})
res = inspect.unwrap(module.get_meta)()
assert res["code"] == module.RetCode.SUCCESS, res
assert "source" in res["data"], res
_set_request_args(monkeypatch, module, {"kb_id": "kb-1"})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.get_basic_info)()
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "knowledgebase_basic_info", lambda _kb_id: {"finished": 1})
res = inspect.unwrap(module.get_basic_info)()
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["finished"] == 1, res
@pytest.mark.p2
def test_knowledge_graph_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.knowledge_graph)("kb-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _DummyKB(tenant_id="tenant-1")))
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(index_exist=lambda *_args, **_kwargs: False))
res = _run(inspect.unwrap(module.knowledge_graph)("kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] == {"graph": {}, "mind_map": {}}, res
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(index_exist=lambda *_args, **_kwargs: True))
class _EmptyRetriever:
async def search(self, *_args, **_kwargs):
return SimpleNamespace(ids=[], field={})
monkeypatch.setattr(module.settings, "retriever", _EmptyRetriever())
res = _run(inspect.unwrap(module.knowledge_graph)("kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] == {"graph": {}, "mind_map": {}}, res
graph_payload = {
"nodes": [{"id": "n2", "pagerank": 2}, {"id": "n1", "pagerank": 3}],
"edges": [
{"source": "n1", "target": "n2", "weight": 2},
{"source": "n1", "target": "n1", "weight": 3},
{"source": "n1", "target": "n3", "weight": 4},
],
}
class _GraphRetriever:
async def search(self, *_args, **_kwargs):
return SimpleNamespace(
ids=["bad"],
field={
"bad": {"knowledge_graph_kwd": "graph", "content_with_weight": "{bad json"},
},
)
monkeypatch.setattr(module.settings, "retriever", _GraphRetriever())
res = _run(inspect.unwrap(module.knowledge_graph)("kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["graph"] == {}, res
class _GraphRetrieverSuccess:
async def search(self, *_args, **_kwargs):
return SimpleNamespace(
ids=["good"],
field={
"good": {"knowledge_graph_kwd": "graph", "content_with_weight": json.dumps(graph_payload)},
},
)
monkeypatch.setattr(module.settings, "retriever", _GraphRetrieverSuccess())
res = _run(inspect.unwrap(module.knowledge_graph)("kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert len(res["data"]["graph"]["nodes"]) == 2, res
assert len(res["data"]["graph"]["edges"]) == 1, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.delete_knowledge_graph)("kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(delete=lambda *_args, **_kwargs: True))
res = inspect.unwrap(module.delete_knowledge_graph)("kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] is True, res
@pytest.mark.p2
def test_list_pipeline_logs_validation_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_args(monkeypatch, module, {})
_set_request_json(monkeypatch, module, {})
res = _run(inspect.unwrap(module.list_pipeline_logs)())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
assert "KB ID" in res["message"], res
_set_request_args(
monkeypatch,
module,
{
"kb_id": "kb-1",
"keywords": "k",
"page": "1",
"page_size": "10",
"orderby": "create_time",
"desc": "false",
"create_date_from": "2025-02-01",
"create_date_to": "2025-01-01",
},
)
_set_request_json(monkeypatch, module, {})
monkeypatch.setattr(module.PipelineOperationLogService, "get_file_logs_by_kb_id", lambda *_args, **_kwargs: ([], 0))
res = _run(inspect.unwrap(module.list_pipeline_logs)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["total"] == 0, res
_set_request_args(
monkeypatch,
module,
{
"kb_id": "kb-1",
"create_date_from": "2025-01-01",
"create_date_to": "2025-02-01",
},
)
_set_request_json(monkeypatch, module, {})
res = _run(inspect.unwrap(module.list_pipeline_logs)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Create data filter is abnormal." in res["message"], res
@pytest.mark.p2
def test_list_pipeline_logs_filter_and_exception_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_args(
monkeypatch,
module,
{
"kb_id": "kb-1",
"page": "1",
"page_size": "10",
"desc": "false",
"create_date_from": "2025-02-01",
"create_date_to": "2025-01-01",
},
)
_set_request_json(monkeypatch, module, {"operation_status": ["BAD_STATUS"]})
res = _run(inspect.unwrap(module.list_pipeline_logs)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "operation_status" in res["message"], res
_set_request_json(monkeypatch, module, {"types": ["bad_type"]})
res = _run(inspect.unwrap(module.list_pipeline_logs)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Invalid filter conditions" in res["message"], res
def _raise_file_logs(*_args, **_kwargs):
raise RuntimeError("logs boom")
_set_request_json(monkeypatch, module, {"suffix": [".txt"]})
monkeypatch.setattr(module.PipelineOperationLogService, "get_file_logs_by_kb_id", _raise_file_logs)
res = _run(inspect.unwrap(module.list_pipeline_logs)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "logs boom" in res["message"], res
@pytest.mark.p2
def test_list_pipeline_dataset_logs_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_args(monkeypatch, module, {})
_set_request_json(monkeypatch, module, {})
res = _run(inspect.unwrap(module.list_pipeline_dataset_logs)())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
assert "KB ID" in res["message"], res
_set_request_args(
monkeypatch,
module,
{
"kb_id": "kb-1",
"desc": "false",
"create_date_from": "2025-01-01",
"create_date_to": "2025-02-01",
},
)
_set_request_json(monkeypatch, module, {})
res = _run(inspect.unwrap(module.list_pipeline_dataset_logs)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Create data filter is abnormal." in res["message"], res
_set_request_args(
monkeypatch,
module,
{
"kb_id": "kb-1",
"page": "1",
"page_size": "10",
"desc": "false",
"create_date_from": "2025-02-01",
"create_date_to": "2025-01-01",
},
)
_set_request_json(monkeypatch, module, {"operation_status": ["NOT_A_STATUS"]})
res = _run(inspect.unwrap(module.list_pipeline_dataset_logs)())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "operation_status" in res["message"], res
_set_request_args(
monkeypatch,
module,
{
"kb_id": "kb-1",
"page": "1",
"page_size": "10",
"desc": "true",
"create_date_from": "2025-02-01",
"create_date_to": "2025-01-01",
},
)
_set_request_json(monkeypatch, module, {"operation_status": []})
monkeypatch.setattr(
module.PipelineOperationLogService,
"get_dataset_logs_by_kb_id",
lambda *_args, **_kwargs: ([{"id": "l1"}], 1),
)
res = _run(inspect.unwrap(module.list_pipeline_dataset_logs)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["total"] == 1, res
assert res["data"]["logs"][0]["id"] == "l1", res
def _raise_dataset_logs(*_args, **_kwargs):
raise RuntimeError("dataset logs boom")
monkeypatch.setattr(module.PipelineOperationLogService, "get_dataset_logs_by_kb_id", _raise_dataset_logs)
res = _run(inspect.unwrap(module.list_pipeline_dataset_logs)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "dataset logs boom" in res["message"], res
@pytest.mark.p2
def test_pipeline_log_detail_and_delete_routes_branches(monkeypatch):
module = _load_kb_module(monkeypatch)
_set_request_args(monkeypatch, module, {})
_set_request_json(monkeypatch, module, {})
res = _run(inspect.unwrap(module.delete_pipeline_logs)())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
assert "KB ID" in res["message"], res
deleted_ids = []
def _delete_by_ids(log_ids):
deleted_ids.extend(log_ids)
monkeypatch.setattr(module.PipelineOperationLogService, "delete_by_ids", _delete_by_ids)
_set_request_args(monkeypatch, module, {"kb_id": "kb-1"})
_set_request_json(monkeypatch, module, {})
res = _run(inspect.unwrap(module.delete_pipeline_logs)())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] is True, res
assert deleted_ids == [], deleted_ids
_set_request_json(monkeypatch, module, {"log_ids": ["l1", "l2"]})
res = _run(inspect.unwrap(module.delete_pipeline_logs)())
assert res["code"] == module.RetCode.SUCCESS, res
assert deleted_ids == ["l1", "l2"], deleted_ids
_set_request_args(monkeypatch, module, {})
res = inspect.unwrap(module.pipeline_log_detail)()
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
assert "Pipeline log ID" in res["message"], res
_set_request_args(monkeypatch, module, {"log_id": "missing"})
monkeypatch.setattr(module.PipelineOperationLogService, "get_by_id", lambda _log_id: (False, None))
res = inspect.unwrap(module.pipeline_log_detail)()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Invalid pipeline log ID" in res["message"], res
class _Log:
def to_dict(self):
return {"id": "log-1", "status": "ok"}
monkeypatch.setattr(module.PipelineOperationLogService, "get_by_id", lambda _log_id: (True, _Log()))
res = inspect.unwrap(module.pipeline_log_detail)()
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["id"] == "log-1", res
@pytest.mark.p2
@pytest.mark.parametrize(
"route_name,task_attr,response_key,task_type",
[
("run_graphrag", "graphrag_task_id", "graphrag_task_id", "graphrag"),
("run_raptor", "raptor_task_id", "raptor_task_id", "raptor"),
("run_mindmap", "mindmap_task_id", "mindmap_task_id", "mindmap"),
],
)
def test_run_pipeline_task_routes_branch_matrix(monkeypatch, route_name, task_attr, response_key, task_type):
module = _load_kb_module(monkeypatch)
route = inspect.unwrap(getattr(module, route_name))
def _make_kb(task_id):
payload = {
"id": "kb-1",
"tenant_id": "tenant-1",
"graphrag_task_id": "",
"raptor_task_id": "",
"mindmap_task_id": "",
}
payload[task_attr] = task_id
return SimpleNamespace(**payload)
warnings = []
monkeypatch.setattr(module.logging, "warning", lambda msg, *_args, **_kwargs: warnings.append(msg))
_set_request_json(monkeypatch, module, {"kb_id": ""})
res = _run(route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "KB ID" in res["message"], res
_set_request_json(monkeypatch, module, {"kb_id": "kb-1"})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Invalid Knowledgebase ID" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _make_kb("task-running")))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (True, SimpleNamespace(progress=0)))
res = _run(route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "already running" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _make_kb("task-stale")))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", lambda **_kwargs: ([], 0))
res = _run(route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "No documents in Knowledgebase kb-1" in res["message"], res
assert warnings, "Expected warning for stale task id"
queue_calls = {}
def _queue_stub(**kwargs):
queue_calls.update(kwargs)
return "queued-task-id"
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _make_kb("")))
monkeypatch.setattr(
module.DocumentService,
"get_by_kb_id",
lambda **_kwargs: ([{"id": "doc-1"}, {"id": "doc-2"}], 2),
)
monkeypatch.setattr(module, "queue_raptor_o_graphrag_tasks", _queue_stub)
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(route())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"][response_key] == "queued-task-id", res
assert queue_calls["ty"] == task_type, queue_calls
assert queue_calls["doc_ids"] == ["doc-1", "doc-2"], queue_calls
@pytest.mark.p2
@pytest.mark.parametrize(
"route_name,task_attr,empty_on_missing_task,error_text",
[
("trace_graphrag", "graphrag_task_id", True, ""),
("trace_raptor", "raptor_task_id", False, "RAPTOR Task Not Found or Error Occurred"),
("trace_mindmap", "mindmap_task_id", False, "Mindmap Task Not Found or Error Occurred"),
],
)
def test_trace_pipeline_task_routes_branch_matrix(monkeypatch, route_name, task_attr, empty_on_missing_task, error_text):
module = _load_kb_module(monkeypatch)
route = inspect.unwrap(getattr(module, route_name))
def _make_kb(task_id):
payload = {
"id": "kb-1",
"tenant_id": "tenant-1",
"graphrag_task_id": "",
"raptor_task_id": "",
"mindmap_task_id": "",
}
payload[task_attr] = task_id
return SimpleNamespace(**payload)
_set_request_args(monkeypatch, module, {"kb_id": ""})
res = route()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "KB ID" in res["message"], res
_set_request_args(monkeypatch, module, {"kb_id": "kb-1"})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = route()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Invalid Knowledgebase ID" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _make_kb("")))
res = route()
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] == {}, res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _make_kb("task-1")))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
res = route()
if empty_on_missing_task:
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] == {}, res
else:
assert res["code"] == module.RetCode.DATA_ERROR, res
assert error_text in res["message"], res
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (True, _DummyTask("task-1", 1)))
res = route()
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["id"] == "task-1", res
@pytest.mark.p2
def test_unbind_task_branch_matrix(monkeypatch):
module = _load_kb_module(monkeypatch)
route = inspect.unwrap(module.delete_kb_task)
_set_request_args(monkeypatch, module, {"kb_id": ""})
res = route()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "KB ID" in res["message"], res
_set_request_args(monkeypatch, module, {"kb_id": "missing", "pipeline_task_type": module.PipelineTaskType.GRAPH_RAG})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = route()
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] is True, res
kb = SimpleNamespace(
id="kb-1",
tenant_id="tenant-1",
graphrag_task_id="graph-task",
raptor_task_id="raptor-task",
mindmap_task_id="mindmap-task",
)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
_set_request_args(monkeypatch, module, {"kb_id": "kb-1", "pipeline_task_type": "unknown"})
res = route()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Invalid task type" in res["message"], res
cancelled = []
deleted = []
update_payloads = []
monkeypatch.setattr(module.REDIS_CONN, "set", lambda key, value: cancelled.append((key, value)))
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(delete=lambda *args, **_kwargs: deleted.append(args)))
def _record_update(_kb_id, payload):
update_payloads.append((_kb_id, payload))
return True
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", _record_update)
_set_request_args(monkeypatch, module, {"kb_id": "kb-1", "pipeline_task_type": module.PipelineTaskType.GRAPH_RAG})
res = route()
assert res["code"] == module.RetCode.SUCCESS, res
_set_request_args(monkeypatch, module, {"kb_id": "kb-1", "pipeline_task_type": module.PipelineTaskType.RAPTOR})
res = route()
assert res["code"] == module.RetCode.SUCCESS, res
_set_request_args(monkeypatch, module, {"kb_id": "kb-1", "pipeline_task_type": module.PipelineTaskType.MINDMAP})
res = route()
assert res["code"] == module.RetCode.SUCCESS, res
assert ("graph-task-cancel", "x") in cancelled, cancelled
assert ("raptor-task-cancel", "x") in cancelled, cancelled
assert ("mindmap-task-cancel", "x") in cancelled, cancelled
assert len(deleted) == 2, deleted
assert any(payload.get("graphrag_task_id") == "" for _, payload in update_payloads), update_payloads
assert any(payload.get("raptor_task_id") == "" for _, payload in update_payloads), update_payloads
assert any(payload.get("mindmap_task_id") == "" for _, payload in update_payloads), update_payloads
class _FlakyPipelineType:
def __init__(self, target):
self.target = target
self.calls = 0
def __eq__(self, other):
self.calls += 1
if self.calls == 1:
return other == self.target
return False
_set_request_args(
monkeypatch,
module,
{"kb_id": "kb-1", "pipeline_task_type": _FlakyPipelineType(module.PipelineTaskType.GRAPH_RAG)},
)
res = route()
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Internal Error: Invalid task type" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module, "server_error_response", lambda e: module.get_json_result(code=module.RetCode.EXCEPTION_ERROR, message=str(e)))
_set_request_args(monkeypatch, module, {"kb_id": "kb-1", "pipeline_task_type": module.PipelineTaskType.GRAPH_RAG})
res = route()
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "cannot delete task" in res["message"], res
@pytest.mark.p2
def test_check_embedding_similarity_threshold_matrix_unit(monkeypatch):
module = _load_kb_module(monkeypatch)
route = inspect.unwrap(module.check_embedding)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, SimpleNamespace(tenant_id="tenant-1")))
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
class _FlipBool:
def __init__(self):
self._calls = 0
def __bool__(self):
self._calls += 1
return self._calls == 1
monkeypatch.setattr(
module.re,
"sub",
lambda _pattern, _repl, text: _FlipBool() if "TRIGGER_NO_TEXT" in str(text) else text,
)
def _fixed_sample(population, k):
return list(population)[:k]
monkeypatch.setattr(module.random, "sample", _fixed_sample)
class _DocStore:
def __init__(self, total, ids_by_offset, docs):
self.total = total
self.ids_by_offset = ids_by_offset
self.docs = docs
def search(self, select_fields, **kwargs):
if not select_fields:
return {"kind": "total"}
return {"kind": "sample", "offset": kwargs["offset"]}
def get_total(self, _res):
return self.total
def get_doc_ids(self, res):
return self.ids_by_offset.get(res.get("offset", -1), [])
def get(self, cid, _index_name, _kb_ids):
return self.docs.get(cid, {})
class _EmbModel:
def __init__(self):
self.calls = []
def encode(self, pair):
title, _txt = pair
self.calls.append(title)
if title == "Doc Mix":
# title+content mix wins over content only path.
return [module.np.array([1.0, 0.0]), module.np.array([0.0, 1.0])], None
if title == "Doc High":
return [module.np.array([1.0, 0.0]), module.np.array([1.0, 0.0])], None
return [module.np.array([0.0, 1.0]), module.np.array([0.0, 1.0])], None
emb_model = _EmbModel()
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: emb_model)
low_docs = {
"chunk-no-vec": {
"doc_id": "doc-no-vec",
"docnm_kwd": "Doc No Vec",
"content_with_weight": "body-no-vec",
"page_num_int": 1,
"position_int": 1,
"top_int": 1,
},
"chunk-bad-type": {
"doc_id": "doc-bad-type",
"docnm_kwd": "Doc Bad Type",
"content_with_weight": "body-bad-type",
"question_kwd": [],
"q_vec": {"bad": "type"},
"page_num_int": 1,
"position_int": 2,
"top_int": 2,
},
"chunk-low-zero": {
"doc_id": "doc-low-zero",
"docnm_kwd": "Doc Low Zero",
"content_with_weight": "body-low",
"question_kwd": [],
"q_vec": "0\t0",
"page_num_int": 1,
"position_int": 3,
"top_int": 3,
},
"chunk-no-text": {
"doc_id": "doc-no-text",
"docnm_kwd": "Doc No Text",
"content_with_weight": "TRIGGER_NO_TEXT",
"q_vec": [1.0, 0.0],
"page_num_int": 1,
"position_int": 4,
"top_int": 4,
},
"chunk-mix": {
"doc_id": "doc-mix",
"docnm_kwd": "Doc Mix",
"content_with_weight": "body-mix",
"q_vec": [1.0, 0.0],
"page_num_int": 1,
"position_int": 5,
"top_int": 5,
},
}
monkeypatch.setattr(
module.settings,
"docStoreConn",
_DocStore(
total=6,
ids_by_offset={
0: [],
1: ["chunk-no-vec"],
2: ["chunk-bad-type"],
3: ["chunk-low-zero"],
4: ["chunk-no-text"],
5: ["chunk-mix"],
},
docs=low_docs,
),
)
_set_request_json(monkeypatch, module, {"kb_id": "kb-1", "embd_id": "emb-1", "check_num": 6})
res = _run(route())
assert res["code"] == module.RetCode.NOT_EFFECTIVE, res
assert "average similarity" in res["message"], res
summary = res["data"]["summary"]
assert summary["sampled"] == 5, summary
assert summary["valid"] == 2, summary
reasons = {item.get("reason") for item in res["data"]["results"] if "reason" in item}
assert "no_stored_vector" in reasons, res
assert "no_text" in reasons, res
assert any(item.get("chunk_id") == "chunk-low-zero" and "cos_sim" in item for item in res["data"]["results"]), res
assert summary["match_mode"] in {"content_only", "title+content"}, summary
high_docs = {
"chunk-high": {
"doc_id": "doc-high",
"docnm_kwd": "Doc High",
"content_with_weight": "body-high",
"q_vec": [1.0, 0.0],
"page_num_int": 1,
"position_int": 1,
"top_int": 1,
}
}
monkeypatch.setattr(
module.settings,
"docStoreConn",
_DocStore(total=1, ids_by_offset={0: ["chunk-high"]}, docs=high_docs),
)
_set_request_json(monkeypatch, module, {"kb_id": "kb-1", "embd_id": "emb-1", "check_num": 1})
res = _run(route())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["summary"]["avg_cos_sim"] > 0.9, res
@pytest.mark.p2
def test_check_embedding_error_and_empty_sample_paths_unit(monkeypatch):
module = _load_kb_module(monkeypatch)
route = inspect.unwrap(module.check_embedding)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, SimpleNamespace(tenant_id="tenant-1")))
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
monkeypatch.setattr(module.random, "sample", lambda population, k: list(population)[:k])
class _DocStore:
def __init__(self, total, ids_by_offset, docs):
self.total = total
self.ids_by_offset = ids_by_offset
self.docs = docs
def search(self, select_fields, **kwargs):
if not select_fields:
return {"kind": "total"}
return {"kind": "sample", "offset": kwargs["offset"]}
def get_total(self, _res):
return self.total
def get_doc_ids(self, res):
return self.ids_by_offset.get(res.get("offset", -1), [])
def get(self, cid, _index_name, _kb_ids):
return self.docs.get(cid, {})
class _BoomEmbModel:
def encode(self, _pair):
raise RuntimeError("encode boom")
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _BoomEmbModel())
monkeypatch.setattr(
module.settings,
"docStoreConn",
_DocStore(
total=1,
ids_by_offset={0: ["chunk-err"]},
docs={
"chunk-err": {
"doc_id": "doc-err",
"docnm_kwd": "Doc Err",
"content_with_weight": "body-err",
"q_vec": [1.0, 0.0],
"page_num_int": 1,
"position_int": 1,
"top_int": 1,
}
},
),
)
_set_request_json(monkeypatch, module, {"kb_id": "kb-1", "embd_id": "emb-1", "check_num": 1})
res = _run(route())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Embedding failure." in res["message"], res
assert "encode boom" in res["message"], res
class _OkEmbModel:
def encode(self, _pair):
return [module.np.array([1.0, 0.0]), module.np.array([1.0, 0.0])], None
monkeypatch.setattr(module, "LLMBundle", lambda *_args, **_kwargs: _OkEmbModel())
monkeypatch.setattr(module.settings, "docStoreConn", _DocStore(total=0, ids_by_offset={}, docs={}))
_set_request_json(monkeypatch, module, {"kb_id": "kb-1", "embd_id": "emb-1", "check_num": 1})
with pytest.raises(UnboundLocalError):
_run(route())
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_kb_routes_unit.py",
"license": "Apache License 2.0",
"lines": 1071,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_llm_app/test_llm_list_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import json
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _ExprField:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, other)
class _StrEnum(str):
@property
def value(self):
return str(self)
class _DummyTenantLLMModel:
tenant_id = _ExprField("tenant_id")
llm_factory = _ExprField("llm_factory")
llm_name = _ExprField("llm_name")
class _TenantLLMRow:
def __init__(
self,
*,
llm_name,
llm_factory,
model_type,
api_key="key",
status="1",
used_tokens=0,
api_base="",
max_tokens=8192,
):
self.llm_name = llm_name
self.llm_factory = llm_factory
self.model_type = model_type
self.api_key = api_key
self.status = status
self.used_tokens = used_tokens
self.api_base = api_base
self.max_tokens = max_tokens
def to_dict(self):
return {
"llm_name": self.llm_name,
"llm_factory": self.llm_factory,
"model_type": self.model_type,
"status": self.status,
"used_tokens": self.used_tokens,
"api_base": self.api_base,
"max_tokens": self.max_tokens,
}
class _LLMRow:
def __init__(self, *, llm_name, fid, model_type, status="1", max_tokens=2048):
self.llm_name = llm_name
self.fid = fid
self.model_type = model_type
self.status = status
self.max_tokens = max_tokens
def to_dict(self):
return {
"llm_name": self.llm_name,
"fid": self.fid,
"model_type": self.model_type,
"status": self.status,
"max_tokens": self.max_tokens,
}
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
async def _get_request_json():
return dict(payload)
monkeypatch.setattr(module, "get_request_json", _get_request_json)
def _load_llm_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args={})
monkeypatch.setitem(sys.modules, "quart", quart_mod)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.login_required = lambda fn: fn
apps_mod.current_user = SimpleNamespace(id="tenant-1")
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
tenant_llm_mod = ModuleType("api.db.services.tenant_llm_service")
class _StubLLMFactoriesService:
@staticmethod
def query(**_kwargs):
return []
class _StubTenantLLMService:
@staticmethod
def ensure_mineru_from_env(_tenant_id):
return None
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def get_my_llms(_tenant_id):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def filter_delete(_filters):
return True
@staticmethod
def filter_update(_filters, _payload):
return True
tenant_llm_mod.LLMFactoriesService = _StubLLMFactoriesService
tenant_llm_mod.TenantLLMService = _StubTenantLLMService
monkeypatch.setitem(sys.modules, "api.db.services.tenant_llm_service", tenant_llm_mod)
llm_service_mod = ModuleType("api.db.services.llm_service")
class _StubLLMService:
@staticmethod
def get_all():
return []
@staticmethod
def query(**_kwargs):
return []
llm_service_mod.LLMService = _StubLLMService
monkeypatch.setitem(sys.modules, "api.db.services.llm_service", llm_service_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
api_utils_mod.get_allowed_llm_factories = lambda: []
api_utils_mod.get_data_error_result = lambda message="", code=400, data=None: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.get_json_result = lambda data=None, message="", code=0: {
"code": code,
"message": message,
"data": data,
}
async def _get_request_json():
return {}
api_utils_mod.get_request_json = _get_request_json
api_utils_mod.server_error_response = lambda exc: {"code": 500, "message": str(exc), "data": None}
api_utils_mod.validate_request = lambda *_args, **_kwargs: (lambda fn: fn)
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
constants_mod = ModuleType("common.constants")
constants_mod.StatusEnum = SimpleNamespace(VALID=SimpleNamespace(value="1"), INVALID=SimpleNamespace(value="0"))
constants_mod.LLMType = SimpleNamespace(
CHAT=_StrEnum("chat"),
EMBEDDING=_StrEnum("embedding"),
SPEECH2TEXT=_StrEnum("speech2text"),
IMAGE2TEXT=_StrEnum("image2text"),
RERANK=_StrEnum("rerank"),
TTS=_StrEnum("tts"),
OCR=_StrEnum("ocr"),
)
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.TenantLLM = _DummyTenantLLMModel
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
base64_mod = ModuleType("rag.utils.base64_image")
base64_mod.test_image = b"image-bytes"
monkeypatch.setitem(sys.modules, "rag.utils.base64_image", base64_mod)
rag_llm_mod = ModuleType("rag.llm")
rag_llm_mod.EmbeddingModel = {}
rag_llm_mod.ChatModel = {}
rag_llm_mod.RerankModel = {}
rag_llm_mod.CvModel = {}
rag_llm_mod.TTSModel = {}
rag_llm_mod.OcrModel = {}
rag_llm_mod.Seq2txtModel = {}
monkeypatch.setitem(sys.modules, "rag.llm", rag_llm_mod)
module_path = repo_root / "api" / "apps" / "llm_app.py"
spec = importlib.util.spec_from_file_location("test_llm_list_unit_module", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_list_app_grouping_availability_and_merge(monkeypatch):
module = _load_llm_app(monkeypatch)
ensure_calls = []
monkeypatch.setattr(module.TenantLLMService, "ensure_mineru_from_env", lambda tenant_id: ensure_calls.append(tenant_id))
tenant_rows = [
_TenantLLMRow(llm_name="fast-emb", llm_factory="FastEmbed", model_type="embedding", api_key="k1", status="1"),
_TenantLLMRow(llm_name="tenant-only", llm_factory="CustomFactory", model_type="chat", api_key="k2", status="1"),
]
monkeypatch.setattr(module.TenantLLMService, "query", lambda **_kwargs: tenant_rows)
all_llms = [
_LLMRow(llm_name="tei-embed", fid="Builtin", model_type="embedding", status="1"),
_LLMRow(llm_name="fast-emb", fid="FastEmbed", model_type="embedding", status="1"),
_LLMRow(llm_name="not-in-status", fid="Other", model_type="chat", status="1"),
]
monkeypatch.setattr(module.LLMService, "get_all", lambda: all_llms)
monkeypatch.setattr(module, "request", SimpleNamespace(args={}))
monkeypatch.setenv("COMPOSE_PROFILES", "tei-cpu")
monkeypatch.setenv("TEI_MODEL", "tei-embed")
res = _run(module.list_app())
assert res["code"] == 0
assert ensure_calls == ["tenant-1"]
data = res["data"]
assert {"Builtin", "FastEmbed", "CustomFactory"}.issubset(set(data.keys()))
builtin = data["Builtin"][0]
assert builtin["llm_name"] == "tei-embed"
assert builtin["available"] is True
fastembed = data["FastEmbed"][0]
assert fastembed["llm_name"] == "fast-emb"
assert fastembed["available"] is True
tenant_only = data["CustomFactory"][0]
assert tenant_only["llm_name"] == "tenant-only"
assert tenant_only["available"] is True
@pytest.mark.p2
def test_list_app_model_type_filter(monkeypatch):
module = _load_llm_app(monkeypatch)
monkeypatch.setattr(module.TenantLLMService, "ensure_mineru_from_env", lambda _tenant_id: None)
monkeypatch.setattr(
module.TenantLLMService,
"query",
lambda **_kwargs: [
_TenantLLMRow(llm_name="fast-emb", llm_factory="FastEmbed", model_type="embedding", api_key="k1", status="1"),
_TenantLLMRow(llm_name="tenant-only", llm_factory="CustomFactory", model_type="chat", api_key="k2", status="1"),
],
)
monkeypatch.setattr(
module.LLMService,
"get_all",
lambda: [
_LLMRow(llm_name="tei-embed", fid="Builtin", model_type="embedding", status="1"),
_LLMRow(llm_name="fast-emb", fid="FastEmbed", model_type="embedding", status="1"),
],
)
monkeypatch.setattr(module, "request", SimpleNamespace(args={"model_type": "chat"}))
res = _run(module.list_app())
assert res["code"] == 0
assert list(res["data"].keys()) == ["CustomFactory"]
assert res["data"]["CustomFactory"][0]["model_type"] == "chat"
@pytest.mark.p2
def test_list_app_exception_path(monkeypatch):
module = _load_llm_app(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args={}))
monkeypatch.setattr(module.TenantLLMService, "ensure_mineru_from_env", lambda _tenant_id: None)
monkeypatch.setattr(
module.TenantLLMService,
"query",
lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("query boom")),
)
res = _run(module.list_app())
assert res["code"] == 500
assert "query boom" in res["message"]
@pytest.mark.p2
def test_factories_route_success_and_exception_unit(monkeypatch):
module = _load_llm_app(monkeypatch)
def _factory(name):
return SimpleNamespace(name=name, to_dict=lambda n=name: {"name": n})
monkeypatch.setattr(
module,
"get_allowed_llm_factories",
lambda: [
_factory("OpenAI"),
_factory("CustomFactory"),
_factory("FastEmbed"),
_factory("Builtin"),
],
)
monkeypatch.setattr(
module.LLMService,
"get_all",
lambda: [
_LLMRow(llm_name="m1", fid="OpenAI", model_type="chat", status="1"),
_LLMRow(llm_name="m2", fid="OpenAI", model_type="embedding", status="1"),
_LLMRow(llm_name="m3", fid="OpenAI", model_type="rerank", status="0"),
],
)
res = module.factories()
assert res["code"] == 0
names = [item["name"] for item in res["data"]]
assert "FastEmbed" not in names
assert "Builtin" not in names
assert {"OpenAI", "CustomFactory"} == set(names)
openai = next(item for item in res["data"] if item["name"] == "OpenAI")
assert {"chat", "embedding"} == set(openai["model_types"])
monkeypatch.setattr(module, "get_allowed_llm_factories", lambda: (_ for _ in ()).throw(RuntimeError("factories boom")))
res = module.factories()
assert res["code"] == 500
assert "factories boom" in res["message"]
@pytest.mark.p2
def test_set_api_key_model_probe_matrix_unit(monkeypatch):
module = _load_llm_app(monkeypatch)
async def _wait_for(coro, *_args, **_kwargs):
return await coro
async def _to_thread(fn, *args, **kwargs):
return fn(*args, **kwargs)
monkeypatch.setattr(module.asyncio, "wait_for", _wait_for)
monkeypatch.setattr(module.asyncio, "to_thread", _to_thread)
class _EmbeddingFail:
def __init__(self, *_args, **_kwargs):
pass
def encode(self, _texts):
return [[]], 1
class _EmbeddingPass:
def __init__(self, *_args, **_kwargs):
pass
def encode(self, _texts):
return [[0.1]], 1
class _ChatFail:
def __init__(self, *_args, **_kwargs):
pass
async def async_chat(self, *_args, **_kwargs):
return "**ERROR** chat fail", 1
class _RerankFail:
def __init__(self, *_args, **_kwargs):
pass
def similarity(self, *_args, **_kwargs):
return [], 0
factory = "FactoryA"
monkeypatch.setattr(
module.LLMService,
"query",
lambda **_kwargs: [
_LLMRow(llm_name="emb", fid=factory, model_type=module.LLMType.EMBEDDING.value, max_tokens=321),
_LLMRow(llm_name="chat", fid=factory, model_type=module.LLMType.CHAT.value, max_tokens=654),
_LLMRow(llm_name="rerank", fid=factory, model_type=module.LLMType.RERANK.value, max_tokens=987),
],
)
monkeypatch.setattr(module, "EmbeddingModel", {factory: _EmbeddingFail})
monkeypatch.setattr(module, "ChatModel", {factory: _ChatFail})
monkeypatch.setattr(module, "RerankModel", {factory: _RerankFail})
req = {"llm_factory": factory, "api_key": "k", "base_url": "http://x", "verify": True}
_set_request_json(monkeypatch, module, req)
res = _run(module.set_api_key())
assert res["code"] == 0
assert res["data"]["success"] is False
assert "Fail to access embedding model(emb)" in res["data"]["message"]
assert "Fail to access model(FactoryA/chat)" in res["data"]["message"]
assert "Fail to access model(FactoryA/rerank)" in res["data"]["message"]
req["verify"] = False
_set_request_json(monkeypatch, module, req)
res = _run(module.set_api_key())
assert res["code"] == 400
assert "Fail to access embedding model(emb)" in res["message"]
calls = {"filter_update": [], "save": []}
def _filter_update(filters, payload):
calls["filter_update"].append((filters, dict(payload)))
return False
def _save(**kwargs):
calls["save"].append(kwargs)
return True
monkeypatch.setattr(module, "EmbeddingModel", {factory: _EmbeddingPass})
monkeypatch.setattr(module.LLMService, "query", lambda **_kwargs: [_LLMRow(llm_name="emb-pass", fid=factory, model_type=module.LLMType.EMBEDDING.value, max_tokens=2049)])
monkeypatch.setattr(module.TenantLLMService, "filter_update", _filter_update)
monkeypatch.setattr(module.TenantLLMService, "save", _save)
success_req = {
"llm_factory": factory,
"api_key": "k2",
"base_url": "http://y",
"model_type": "chat",
"llm_name": "manual-model",
}
_set_request_json(monkeypatch, module, success_req)
res = _run(module.set_api_key())
assert res["code"] == 0
assert res["data"] is True
assert calls["filter_update"]
assert calls["filter_update"][0][1]["model_type"] == "chat"
assert calls["filter_update"][0][1]["llm_name"] == "manual-model"
assert calls["filter_update"][0][1]["max_tokens"] == 2049
assert calls["save"][0]["max_tokens"] == 2049
assert calls["save"][0]["llm_name"] == "emb-pass"
@pytest.mark.p2
def test_add_llm_factory_specific_key_assembly_unit(monkeypatch):
module = _load_llm_app(monkeypatch)
async def _wait_for(coro, *_args, **_kwargs):
return await coro
async def _to_thread(fn, *args, **kwargs):
return fn(*args, **kwargs)
monkeypatch.setattr(module.asyncio, "wait_for", _wait_for)
monkeypatch.setattr(module.asyncio, "to_thread", _to_thread)
allowed = [
"VolcEngine",
"Tencent Cloud",
"Bedrock",
"LocalAI",
"HuggingFace",
"OpenAI-API-Compatible",
"VLLM",
"XunFei Spark",
"BaiduYiyan",
"Fish Audio",
"Google Cloud",
"Azure-OpenAI",
"OpenRouter",
"MinerU",
"PaddleOCR",
]
monkeypatch.setattr(module, "get_allowed_llm_factories", lambda: [SimpleNamespace(name=name) for name in allowed])
captured = {"chat": [], "tts": [], "filter_payloads": []}
class _ChatOK:
def __init__(self, key, model_name, base_url="", **_kwargs):
captured["chat"].append((key, model_name, base_url))
async def async_chat(self, *_args, **_kwargs):
return "ok", 1
class _TTSOK:
def __init__(self, key, model_name, base_url="", **_kwargs):
captured["tts"].append((key, model_name, base_url))
def tts(self, _text):
yield b"ok"
monkeypatch.setattr(module, "ChatModel", {name: _ChatOK for name in allowed})
monkeypatch.setattr(module, "TTSModel", {"XunFei Spark": _TTSOK})
monkeypatch.setattr(module.TenantLLMService, "filter_update", lambda _filters, payload: captured["filter_payloads"].append(dict(payload)) or True)
reject_req = {"llm_factory": "NotAllowed", "llm_name": "x", "model_type": module.LLMType.CHAT.value}
_set_request_json(monkeypatch, module, reject_req)
res = _run(module.add_llm())
assert res["code"] == 400
assert "is not allowed" in res["message"]
def _run_case(factory, *, model_type=module.LLMType.CHAT.value, extra=None):
req = {"llm_factory": factory, "llm_name": "model", "model_type": model_type, "api_key": "k", "api_base": "http://api"}
if extra:
req.update(extra)
_set_request_json(monkeypatch, module, req)
out = _run(module.add_llm())
assert out["code"] == 0
assert out["data"] is True
return captured["filter_payloads"][-1]
volc = _run_case("VolcEngine", extra={"ark_api_key": "ak", "endpoint_id": "eid"})
assert json.loads(volc["api_key"]) == {"ark_api_key": "ak", "endpoint_id": "eid"}
bedrock = _run_case(
"Bedrock",
extra={"auth_mode": "iam", "bedrock_ak": "ak", "bedrock_sk": "sk", "bedrock_region": "r", "aws_role_arn": "arn"},
)
assert json.loads(bedrock["api_key"]) == {
"auth_mode": "iam",
"bedrock_ak": "ak",
"bedrock_sk": "sk",
"bedrock_region": "r",
"aws_role_arn": "arn",
}
localai = _run_case("LocalAI")
assert localai["llm_name"] == "model___LocalAI"
huggingface = _run_case("HuggingFace")
assert huggingface["llm_name"] == "model___HuggingFace"
openapi = _run_case("OpenAI-API-Compatible")
assert openapi["llm_name"] == "model___OpenAI-API"
vllm = _run_case("VLLM")
assert vllm["llm_name"] == "model___VLLM"
spark_chat = _run_case("XunFei Spark", extra={"spark_api_password": "spark-pass"})
assert spark_chat["api_key"] == "spark-pass"
spark_tts = _run_case(
"XunFei Spark",
model_type=module.LLMType.TTS.value,
extra={"spark_app_id": "app", "spark_api_secret": "secret", "spark_api_key": "key"},
)
assert json.loads(spark_tts["api_key"]) == {
"spark_app_id": "app",
"spark_api_secret": "secret",
"spark_api_key": "key",
}
baidu = _run_case("BaiduYiyan", extra={"yiyan_ak": "ak", "yiyan_sk": "sk"})
assert json.loads(baidu["api_key"]) == {"yiyan_ak": "ak", "yiyan_sk": "sk"}
fish = _run_case("Fish Audio", extra={"fish_audio_ak": "ak", "fish_audio_refid": "rid"})
assert json.loads(fish["api_key"]) == {"fish_audio_ak": "ak", "fish_audio_refid": "rid"}
google = _run_case(
"Google Cloud",
extra={"google_project_id": "pid", "google_region": "us", "google_service_account_key": "sak"},
)
assert json.loads(google["api_key"]) == {
"google_project_id": "pid",
"google_region": "us",
"google_service_account_key": "sak",
}
azure = _run_case("Azure-OpenAI", extra={"api_key": "real-key", "api_version": "2024-01-01"})
assert json.loads(azure["api_key"]) == {"api_key": "real-key", "api_version": "2024-01-01"}
openrouter = _run_case("OpenRouter", extra={"api_key": "or-key", "provider_order": "a,b"})
assert json.loads(openrouter["api_key"]) == {"api_key": "or-key", "provider_order": "a,b"}
mineru = _run_case("MinerU", extra={"api_key": "m-key", "provider_order": "p1"})
assert json.loads(mineru["api_key"]) == {"api_key": "m-key", "provider_order": "p1"}
paddle = _run_case("PaddleOCR", extra={"api_key": "p-key", "provider_order": "p2"})
assert json.loads(paddle["api_key"]) == {"api_key": "p-key", "provider_order": "p2"}
tencent_req = {
"llm_factory": "Tencent Cloud",
"llm_name": "model",
"model_type": module.LLMType.CHAT.value,
"tencent_cloud_sid": "sid",
"tencent_cloud_sk": "sk",
}
async def _tencent_request_json():
return tencent_req
monkeypatch.setattr(module, "get_request_json", _tencent_request_json)
delegated = {}
async def _fake_set_api_key():
delegated["api_key"] = tencent_req.get("api_key")
return {"code": 0, "data": "delegated"}
monkeypatch.setattr(module, "set_api_key", _fake_set_api_key)
res = _run(module.add_llm())
assert res["code"] == 0
assert res["data"] == "delegated"
assert json.loads(delegated["api_key"]) == {"tencent_cloud_sid": "sid", "tencent_cloud_sk": "sk"}
@pytest.mark.p2
def test_add_llm_model_type_probe_and_persistence_matrix_unit(monkeypatch):
module = _load_llm_app(monkeypatch)
async def _wait_for(coro, *_args, **_kwargs):
return await coro
async def _to_thread(fn, *args, **kwargs):
return fn(*args, **kwargs)
monkeypatch.setattr(module.asyncio, "wait_for", _wait_for)
monkeypatch.setattr(module.asyncio, "to_thread", _to_thread)
monkeypatch.setattr(
module,
"get_allowed_llm_factories",
lambda: [
SimpleNamespace(name=name)
for name in [
"FEmbFail",
"FEmbPass",
"FChatFail",
"FChatPass",
"FRKey",
"FRFail",
"FImgFail",
"FTTSFail",
"FOcrFail",
"FSttFail",
"FUnknown",
]
],
)
class _EmbeddingFail:
def __init__(self, *_args, **_kwargs):
pass
def encode(self, _texts):
return [[]], 1
class _EmbeddingPass:
def __init__(self, *_args, **_kwargs):
pass
def encode(self, _texts):
return [[0.5]], 1
class _ChatFail:
def __init__(self, *_args, **_kwargs):
pass
async def async_chat(self, *_args, **_kwargs):
return "**ERROR**: chat failed", 0
class _ChatPass:
def __init__(self, *_args, **_kwargs):
pass
async def async_chat(self, *_args, **_kwargs):
return "ok", 1
class _RerankFail:
def __init__(self, *_args, **_kwargs):
pass
def similarity(self, *_args, **_kwargs):
return [], 1
class _CvFail:
def __init__(self, *_args, **_kwargs):
pass
def describe(self, _image_data):
return "**ERROR**: image failed", 0
class _TTSFail:
def __init__(self, *_args, **_kwargs):
pass
def tts(self, _text):
raise RuntimeError("tts fail")
yield b"x"
class _OcrFail:
def __init__(self, *_args, **_kwargs):
pass
def check_available(self):
return False, "ocr unavailable"
class _SttFail:
def __init__(self, *_args, **_kwargs):
raise RuntimeError("stt fail")
class _RerankKeyMap(dict):
def __contains__(self, key):
if key == "FRKey":
return True
return super().__contains__(key)
def __getitem__(self, key):
if key == "FRKey":
raise KeyError("rerank key fail")
return super().__getitem__(key)
monkeypatch.setattr(module, "EmbeddingModel", {"FEmbFail": _EmbeddingFail, "FEmbPass": _EmbeddingPass})
monkeypatch.setattr(module, "ChatModel", {"FChatFail": _ChatFail, "FChatPass": _ChatPass})
monkeypatch.setattr(module, "RerankModel", _RerankKeyMap({"FRFail": _RerankFail}))
monkeypatch.setattr(module, "CvModel", {"FImgFail": _CvFail})
monkeypatch.setattr(module, "TTSModel", {"FTTSFail": _TTSFail})
monkeypatch.setattr(module, "OcrModel", {"FOcrFail": _OcrFail})
monkeypatch.setattr(module, "Seq2txtModel", {"FSttFail": _SttFail})
def _call(req):
_set_request_json(monkeypatch, module, req)
return _run(module.add_llm())
res = _call({"llm_factory": "FEmbFail", "llm_name": "m", "model_type": module.LLMType.EMBEDDING.value, "verify": True})
assert res["code"] == 0
assert res["data"]["success"] is False
assert "Fail to access embedding model(m)." in res["data"]["message"]
res = _call({"llm_factory": "FEmbFail", "llm_name": "m", "model_type": module.LLMType.EMBEDDING.value})
assert res["code"] == 400
assert "Fail to access embedding model(m)." in res["message"]
res = _call({"llm_factory": "FChatFail", "llm_name": "m", "model_type": module.LLMType.CHAT.value, "verify": True})
assert res["code"] == 0
assert "Fail to access model(FChatFail/m)." in res["data"]["message"]
res = _call({"llm_factory": "FRKey", "llm_name": "m", "model_type": module.LLMType.RERANK.value, "verify": True})
assert res["code"] == 0
assert "dose not support this model(FRKey/m)" in res["data"]["message"]
res = _call({"llm_factory": "FRFail", "llm_name": "m", "model_type": module.LLMType.RERANK.value, "verify": True})
assert res["code"] == 0
assert "Fail to access model(FRFail/m)." in res["data"]["message"]
res = _call({"llm_factory": "FImgFail", "llm_name": "m", "model_type": module.LLMType.IMAGE2TEXT.value, "verify": True})
assert res["code"] == 0
assert "Fail to access model(FImgFail/m)." in res["data"]["message"]
res = _call({"llm_factory": "FTTSFail", "llm_name": "m", "model_type": module.LLMType.TTS.value, "verify": True})
assert res["code"] == 0
assert "Fail to access model(FTTSFail/m)." in res["data"]["message"]
res = _call({"llm_factory": "FOcrFail", "llm_name": "m", "model_type": module.LLMType.OCR.value, "verify": True})
assert res["code"] == 0
assert "Fail to access model(FOcrFail/m)." in res["data"]["message"]
res = _call({"llm_factory": "FSttFail", "llm_name": "m", "model_type": module.LLMType.SPEECH2TEXT.value, "verify": True})
assert res["code"] == 0
assert "Fail to access model(FSttFail/m)." in res["data"]["message"]
_set_request_json(monkeypatch, module, {"llm_factory": "FUnknown", "llm_name": "m", "model_type": "unknown"})
with pytest.raises(RuntimeError, match="Unknown model type: unknown"):
_run(module.add_llm())
saved = []
monkeypatch.setattr(module.TenantLLMService, "filter_update", lambda _filters, _payload: False)
monkeypatch.setattr(module.TenantLLMService, "save", lambda **kwargs: saved.append(kwargs) or True)
res = _call({"llm_factory": "FChatPass", "llm_name": "m", "model_type": module.LLMType.CHAT.value, "api_key": "k"})
assert res["code"] == 0
assert res["data"] is True
assert saved
assert saved[0]["llm_factory"] == "FChatPass"
@pytest.mark.p2
def test_llm_mutation_routes_unit(monkeypatch):
module = _load_llm_app(monkeypatch)
calls = {"delete": [], "update": []}
monkeypatch.setattr(module.TenantLLMService, "filter_delete", lambda filters: calls["delete"].append(filters) or True)
monkeypatch.setattr(module.TenantLLMService, "filter_update", lambda filters, payload: calls["update"].append((filters, payload)) or True)
_set_request_json(monkeypatch, module, {"llm_factory": "OpenAI", "llm_name": "gpt"})
res = _run(module.delete_llm())
assert res["code"] == 0
assert res["data"] is True
_set_request_json(monkeypatch, module, {"llm_factory": "OpenAI", "llm_name": "gpt", "status": 0})
res = _run(module.enable_llm())
assert res["code"] == 0
assert res["data"] is True
assert calls["update"][0][1]["status"] == "0"
_set_request_json(monkeypatch, module, {"llm_factory": "OpenAI"})
res = _run(module.delete_factory())
assert res["code"] == 0
assert res["data"] is True
assert len(calls["delete"]) == 2
@pytest.mark.p2
def test_my_llms_include_details_and_exception_unit(monkeypatch):
module = _load_llm_app(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args={"include_details": "true"}))
ensure_calls = []
monkeypatch.setattr(module.TenantLLMService, "ensure_mineru_from_env", lambda tenant_id: ensure_calls.append(tenant_id))
monkeypatch.setattr(
module.TenantLLMService,
"query",
lambda **_kwargs: [
_TenantLLMRow(
llm_name="chat-model",
llm_factory="FactoryX",
model_type="chat",
used_tokens=42,
api_base="",
max_tokens=4096,
status="1",
)
],
)
monkeypatch.setattr(module.LLMFactoriesService, "query", lambda **_kwargs: [SimpleNamespace(name="FactoryX", tags=["tag-a"])])
res = module.my_llms()
assert res["code"] == 0
assert ensure_calls == ["tenant-1"]
assert "FactoryX" in res["data"]
assert res["data"]["FactoryX"]["tags"] == ["tag-a"]
assert res["data"]["FactoryX"]["llm"][0]["used_token"] == 42
assert res["data"]["FactoryX"]["llm"][0]["max_tokens"] == 4096
monkeypatch.setattr(module.TenantLLMService, "ensure_mineru_from_env", lambda _tenant_id: (_ for _ in ()).throw(RuntimeError("my llms boom")))
res = module.my_llms()
assert res["code"] == 500
assert "my llms boom" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_llm_app/test_llm_list_unit.py",
"license": "Apache License 2.0",
"lines": 692,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_mcp_server_app/test_mcp_server_app_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import json
import sys
from functools import wraps
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _Field:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, other)
class _DummyMCPServer:
id = _Field("id")
tenant_id = _Field("tenant_id")
def __init__(self, **kwargs):
self.id = kwargs.get("id", "")
self.name = kwargs.get("name", "")
self.url = kwargs.get("url", "")
self.server_type = kwargs.get("server_type", "sse")
self.tenant_id = kwargs.get("tenant_id", "tenant_1")
self.variables = kwargs.get("variables", {})
self.headers = kwargs.get("headers", {})
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"url": self.url,
"server_type": self.server_type,
"tenant_id": self.tenant_id,
"variables": self.variables,
"headers": self.headers,
}
class _DummyMCPServerService:
@staticmethod
def get_servers(*_args, **_kwargs):
return []
@staticmethod
def get_or_none(*_args, **_kwargs):
return None
@staticmethod
def get_by_id(*_args, **_kwargs):
return False, None
@staticmethod
def get_by_name_and_tenant(*_args, **_kwargs):
return False, None
@staticmethod
def insert(**_kwargs):
return True
@staticmethod
def filter_update(*_args, **_kwargs):
return True
@staticmethod
def delete_by_ids(*_args, **_kwargs):
return True
class _DummyTenantService:
@staticmethod
def get_by_id(*_args, **_kwargs):
return True, SimpleNamespace(id="tenant_1")
class _DummyTool:
def __init__(self, name):
self._name = name
def model_dump(self):
return {"name": self._name}
class _DummyMCPToolCallSession:
def __init__(self, _mcp_server, _variables):
self._tools = [_DummyTool("tool_a"), _DummyTool("tool_b")]
def get_tools(self, _timeout):
return self._tools
def tool_call(self, _name, _arguments, _timeout):
return "ok"
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
async def _request_json():
return payload
monkeypatch.setattr(module, "get_request_json", _request_json)
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_mcp_server_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.current_user = SimpleNamespace(id="tenant_1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.MCPServer = _DummyMCPServer
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
mcp_service_mod = ModuleType("api.db.services.mcp_server_service")
mcp_service_mod.MCPServerService = _DummyMCPServerService
monkeypatch.setitem(sys.modules, "api.db.services.mcp_server_service", mcp_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
user_service_mod.TenantService = _DummyTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
mcp_conn_mod = ModuleType("common.mcp_tool_call_conn")
mcp_conn_mod.MCPToolCallSession = _DummyMCPToolCallSession
mcp_conn_mod.close_multiple_mcp_toolcall_sessions = lambda _sessions: None
monkeypatch.setitem(sys.modules, "common.mcp_tool_call_conn", mcp_conn_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
async def _default_request_json():
return {}
def _get_json_result(code=0, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _get_data_error_result(code=102, message="Sorry! Data missing!"):
return {"code": code, "message": message}
def _server_error_response(error):
return {"code": 100, "message": repr(error)}
async def _get_mcp_tools(*_args, **_kwargs):
return {}
def _validate_request(*_args, **_kwargs):
def _decorator(func):
@wraps(func)
async def _wrapped(*func_args, **func_kwargs):
if inspect.iscoroutinefunction(func):
return await func(*func_args, **func_kwargs)
return func(*func_args, **func_kwargs)
return _wrapped
return _decorator
api_utils_mod.get_request_json = _default_request_json
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.get_data_error_result = _get_data_error_result
api_utils_mod.server_error_response = _server_error_response
api_utils_mod.validate_request = _validate_request
api_utils_mod.get_mcp_tools = _get_mcp_tools
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
web_utils_mod = ModuleType("api.utils.web_utils")
def _get_float(data, key, default):
try:
return float(data.get(key, default))
except (TypeError, ValueError):
return default
def _safe_json_parse(value):
if isinstance(value, (dict, list)):
return value
if value in (None, ""):
return {}
try:
return json.loads(value)
except (TypeError, ValueError):
return {}
web_utils_mod.get_float = _get_float
web_utils_mod.safe_json_parse = _safe_json_parse
monkeypatch.setitem(sys.modules, "api.utils.web_utils", web_utils_mod)
module_name = "test_mcp_server_app_unit_module"
module_path = repo_root / "api" / "apps" / "mcp_server_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_list_mcp_desc_pagination_and_exception(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
monkeypatch.setattr(
module,
"request",
SimpleNamespace(args={"keywords": "k", "page": "2", "page_size": "1", "orderby": "create_time", "desc": "false"}),
)
_set_request_json(monkeypatch, module, {"mcp_ids": []})
monkeypatch.setattr(module.MCPServerService, "get_servers", lambda *_args, **_kwargs: [{"id": "a"}, {"id": "b"}])
res = _run(module.list_mcp())
assert res["code"] == 0
assert res["data"]["total"] == 2
assert res["data"]["mcp_servers"] == [{"id": "b"}]
monkeypatch.setattr(module, "request", SimpleNamespace(args={}))
_set_request_json(monkeypatch, module, {"mcp_ids": []})
def _raise_list(*_args, **_kwargs):
raise RuntimeError("list explode")
monkeypatch.setattr(module.MCPServerService, "get_servers", _raise_list)
res = _run(module.list_mcp())
assert res["code"] == 100
assert "list explode" in res["message"]
@pytest.mark.p2
def test_detail_not_found_success_and_exception(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args={"mcp_id": "mcp-1"}))
monkeypatch.setattr(module.MCPServerService, "get_or_none", lambda **_kwargs: None)
res = module.detail()
assert res["code"] == module.RetCode.NOT_FOUND
monkeypatch.setattr(
module.MCPServerService,
"get_or_none",
lambda **_kwargs: _DummyMCPServer(id="mcp-1", name="srv", url="http://a", server_type="sse", tenant_id="tenant_1"),
)
res = module.detail()
assert res["code"] == 0
assert res["data"]["id"] == "mcp-1"
def _raise_detail(**_kwargs):
raise RuntimeError("detail explode")
monkeypatch.setattr(module.MCPServerService, "get_or_none", _raise_detail)
res = module.detail()
assert res["code"] == 100
assert "detail explode" in res["message"]
@pytest.mark.p2
def test_create_validation_guards(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
monkeypatch.setattr(module.MCPServerService, "get_by_name_and_tenant", lambda **_kwargs: (False, None))
_set_request_json(monkeypatch, module, {"name": "srv", "url": "http://a", "server_type": "invalid"})
res = _run(module.create.__wrapped__())
assert "Unsupported MCP server type" in res["message"]
_set_request_json(monkeypatch, module, {"name": "", "url": "http://a", "server_type": "sse"})
res = _run(module.create.__wrapped__())
assert "Invalid MCP name" in res["message"]
monkeypatch.setattr(module.MCPServerService, "get_by_name_and_tenant", lambda **_kwargs: (True, object()))
_set_request_json(monkeypatch, module, {"name": "srv", "url": "http://a", "server_type": "sse"})
res = _run(module.create.__wrapped__())
assert "Duplicated MCP server name" in res["message"]
monkeypatch.setattr(module.MCPServerService, "get_by_name_and_tenant", lambda **_kwargs: (False, None))
_set_request_json(monkeypatch, module, {"name": "srv", "url": "", "server_type": "sse"})
res = _run(module.create.__wrapped__())
assert "Invalid url" in res["message"]
@pytest.mark.p2
def test_create_service_paths(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
base_payload = {
"name": "srv",
"url": "http://server",
"server_type": "sse",
"headers": '{"Authorization": "x"}',
"variables": '{"tools": {"old": 1}, "token": "abc"}',
"timeout": "2.5",
}
monkeypatch.setattr(module, "get_uuid", lambda: "uuid-create")
monkeypatch.setattr(module.MCPServerService, "get_by_name_and_tenant", lambda **_kwargs: (False, None))
_set_request_json(monkeypatch, module, dict(base_payload))
monkeypatch.setattr(module.TenantService, "get_by_id", lambda *_args, **_kwargs: (False, None))
res = _run(module.create.__wrapped__())
assert "Tenant not found" in res["message"]
_set_request_json(monkeypatch, module, dict(base_payload))
monkeypatch.setattr(module.TenantService, "get_by_id", lambda *_args, **_kwargs: (True, object()))
async def _thread_pool_tools_error(_func, _servers, _timeout):
return None, "tools error"
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_tools_error)
res = _run(module.create.__wrapped__())
assert res["code"] == "tools error"
assert "Sorry! Data missing!" in res["message"]
_set_request_json(monkeypatch, module, dict(base_payload))
async def _thread_pool_ok(_func, servers, _timeout):
return {servers[0].name: [{"name": "tool_a"}, {"invalid": True}]}, None
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_ok)
monkeypatch.setattr(module.MCPServerService, "insert", lambda **_kwargs: False)
res = _run(module.create.__wrapped__())
assert res["code"] == "Failed to create MCP server."
assert "Sorry! Data missing!" in res["message"]
_set_request_json(monkeypatch, module, dict(base_payload))
monkeypatch.setattr(module.MCPServerService, "insert", lambda **_kwargs: True)
res = _run(module.create.__wrapped__())
assert res["code"] == 0
assert res["data"]["id"] == "uuid-create"
assert res["data"]["tenant_id"] == "tenant_1"
assert res["data"]["variables"]["tools"] == {"tool_a": {"name": "tool_a"}}
_set_request_json(monkeypatch, module, dict(base_payload))
async def _thread_pool_raises(_func, _servers, _timeout):
raise RuntimeError("create explode")
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_raises)
res = _run(module.create.__wrapped__())
assert res["code"] == 100
assert "create explode" in res["message"]
@pytest.mark.p2
def test_update_validation_guards(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
existing = _DummyMCPServer(id="mcp-1", name="srv", url="http://server", server_type="sse", tenant_id="tenant_1", variables={}, headers={})
_set_request_json(monkeypatch, module, {"mcp_id": "mcp-1"})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (False, None))
res = _run(module.update.__wrapped__())
assert "Cannot find MCP server" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "mcp-1"})
monkeypatch.setattr(
module.MCPServerService,
"get_by_id",
lambda _mcp_id: (True, _DummyMCPServer(id="mcp-1", name="srv", url="http://server", server_type="sse", tenant_id="other", variables={}, headers={})),
)
res = _run(module.update.__wrapped__())
assert "Cannot find MCP server" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "mcp-1", "server_type": "invalid"})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, existing))
res = _run(module.update.__wrapped__())
assert "Unsupported MCP server type" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "mcp-1", "name": "a" * 256})
res = _run(module.update.__wrapped__())
assert "Invalid MCP name" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "mcp-1", "url": ""})
res = _run(module.update.__wrapped__())
assert "Invalid url" in res["message"]
@pytest.mark.p2
def test_update_service_paths(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
existing = _DummyMCPServer(
id="mcp-1",
name="srv",
url="http://server",
server_type="sse",
tenant_id="tenant_1",
variables={"tools": {"old": {"enabled": True}}, "token": "abc"},
headers={"Authorization": "old"},
)
updated = _DummyMCPServer(
id="mcp-1",
name="srv-new",
url="http://server-new",
server_type="sse",
tenant_id="tenant_1",
variables={"tools": {"tool_a": {"name": "tool_a"}}},
headers={"Authorization": "new"},
)
base_payload = {
"mcp_id": "mcp-1",
"name": "srv-new",
"url": "http://server-new",
"server_type": "sse",
"headers": '{"Authorization": "new"}',
"variables": '{"tools": {"ignore": 1}, "token": "new"}',
"timeout": "3.0",
}
_set_request_json(monkeypatch, module, dict(base_payload))
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, existing))
async def _thread_pool_tools_error(_func, _servers, _timeout):
return None, "update tools error"
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_tools_error)
res = _run(module.update.__wrapped__())
assert res["code"] == "update tools error"
assert "Sorry! Data missing!" in res["message"]
_set_request_json(monkeypatch, module, dict(base_payload))
async def _thread_pool_ok(_func, servers, _timeout):
return {servers[0].name: [{"name": "tool_a"}, {"bad": True}]}, None
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_ok)
monkeypatch.setattr(module.MCPServerService, "filter_update", lambda *_args, **_kwargs: False)
res = _run(module.update.__wrapped__())
assert "Failed to updated MCP server" in res["message"]
_set_request_json(monkeypatch, module, dict(base_payload))
monkeypatch.setattr(module.MCPServerService, "filter_update", lambda *_args, **_kwargs: True)
def _get_by_id_fetch_fail(_mcp_id):
if _get_by_id_fetch_fail.calls == 0:
_get_by_id_fetch_fail.calls += 1
return True, existing
return False, None
_get_by_id_fetch_fail.calls = 0
monkeypatch.setattr(module.MCPServerService, "get_by_id", _get_by_id_fetch_fail)
res = _run(module.update.__wrapped__())
assert "Failed to fetch updated MCP server" in res["message"]
_set_request_json(monkeypatch, module, dict(base_payload))
def _get_by_id_success(_mcp_id):
if _get_by_id_success.calls == 0:
_get_by_id_success.calls += 1
return True, existing
return True, updated
_get_by_id_success.calls = 0
monkeypatch.setattr(module.MCPServerService, "get_by_id", _get_by_id_success)
res = _run(module.update.__wrapped__())
assert res["code"] == 0
assert res["data"]["id"] == "mcp-1"
_set_request_json(monkeypatch, module, dict(base_payload))
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, existing))
async def _thread_pool_raises(_func, _servers, _timeout):
raise RuntimeError("update explode")
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_raises)
res = _run(module.update.__wrapped__())
assert res["code"] == 100
assert "update explode" in res["message"]
@pytest.mark.p2
def test_rm_failure_success_and_exception(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcp_ids": ["a", "b"]})
monkeypatch.setattr(module.MCPServerService, "delete_by_ids", lambda _ids: False)
res = _run(module.rm.__wrapped__())
assert "Failed to delete MCP servers" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_ids": ["a", "b"]})
monkeypatch.setattr(module.MCPServerService, "delete_by_ids", lambda _ids: True)
res = _run(module.rm.__wrapped__())
assert res["code"] == 0
assert res["data"] is True
_set_request_json(monkeypatch, module, {"mcp_ids": ["a", "b"]})
def _raise_rm(_ids):
raise RuntimeError("rm explode")
monkeypatch.setattr(module.MCPServerService, "delete_by_ids", _raise_rm)
res = _run(module.rm.__wrapped__())
assert res["code"] == 100
assert "rm explode" in res["message"]
@pytest.mark.p2
def test_import_multiple_missing_servers_and_exception(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcpServers": {}})
res = _run(module.import_multiple.__wrapped__())
assert "No MCP servers provided" in res["message"]
_set_request_json(monkeypatch, module, {"mcpServers": {"srv": {"type": "sse", "url": "http://x"}}, "timeout": "1"})
def _raise_import(**_kwargs):
raise RuntimeError("import explode")
monkeypatch.setattr(module.MCPServerService, "get_by_name_and_tenant", _raise_import)
res = _run(module.import_multiple.__wrapped__())
assert res["code"] == 100
assert "import explode" in res["message"]
@pytest.mark.p2
def test_import_multiple_mixed_results(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
payload = {
"mcpServers": {
"missing_fields": {"type": "sse"},
"": {"type": "sse", "url": "http://empty"},
"dup": {"type": "sse", "url": "http://dup", "authorization_token": "dup-token"},
"tool_err": {"type": "sse", "url": "http://err"},
"insert_fail": {"type": "sse", "url": "http://fail"},
},
"timeout": "3",
}
_set_request_json(monkeypatch, module, payload)
monkeypatch.setattr(module, "get_uuid", lambda: "uuid-import")
def _get_by_name_and_tenant(name, tenant_id):
if name == "dup" and not _get_by_name_and_tenant.first_dup_seen:
_get_by_name_and_tenant.first_dup_seen = True
return True, object()
return False, None
_get_by_name_and_tenant.first_dup_seen = False
monkeypatch.setattr(module.MCPServerService, "get_by_name_and_tenant", _get_by_name_and_tenant)
async def _thread_pool_exec(func, servers, _timeout):
mcp_server = servers[0]
if mcp_server.name == "tool_err":
return None, "tool call failed"
return {mcp_server.name: [{"name": "tool_a"}, {"invalid": True}]}, None
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec)
def _insert(**kwargs):
return kwargs["name"] != "insert_fail"
monkeypatch.setattr(module.MCPServerService, "insert", _insert)
res = _run(module.import_multiple.__wrapped__())
assert res["code"] == 0
results = {item["server"]: item for item in res["data"]["results"]}
assert results["missing_fields"]["success"] is False
assert "Missing required fields" in results["missing_fields"]["message"]
assert results[""]["success"] is False
assert "Invalid MCP name" in results[""]["message"]
assert results["tool_err"]["success"] is False
assert "tool call failed" in results["tool_err"]["message"]
assert results["insert_fail"]["success"] is False
assert "Failed to create MCP server" in results["insert_fail"]["message"]
assert results["dup"]["success"] is True
assert results["dup"]["new_name"] == "dup_0"
assert "Renamed from 'dup' to 'dup_0' avoid duplication" == results["dup"]["message"]
@pytest.mark.p2
def test_export_multiple_missing_ids_success_and_exception(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcp_ids": []})
res = _run(module.export_multiple.__wrapped__())
assert "No MCP server IDs provided" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_ids": ["id1", "id2", "id3"]})
def _get_by_id(mcp_id):
if mcp_id == "id1":
return True, _DummyMCPServer(
id="id1",
name="srv-one",
url="http://one",
server_type="sse",
tenant_id="tenant_1",
variables={"authorization_token": "tok", "tools": {"tool_a": {"enabled": True}}},
)
if mcp_id == "id2":
return True, _DummyMCPServer(
id="id2",
name="srv-two",
url="http://two",
server_type="sse",
tenant_id="other",
variables={},
)
return False, None
monkeypatch.setattr(module.MCPServerService, "get_by_id", _get_by_id)
res = _run(module.export_multiple.__wrapped__())
assert res["code"] == 0
assert list(res["data"]["mcpServers"].keys()) == ["srv-one"]
_set_request_json(monkeypatch, module, {"mcp_ids": ["id1"]})
def _raise_export(_mcp_id):
raise RuntimeError("export explode")
monkeypatch.setattr(module.MCPServerService, "get_by_id", _raise_export)
res = _run(module.export_multiple.__wrapped__())
assert res["code"] == 100
assert "export explode" in res["message"]
@pytest.mark.p2
def test_list_tools_missing_ids_success_inner_error_outer_error_and_finally_cleanup(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcp_ids": []})
res = _run(module.list_tools.__wrapped__())
assert "No MCP server IDs provided" in res["message"]
server = _DummyMCPServer(
id="id1",
name="srv-tools",
url="http://tools",
server_type="sse",
tenant_id="tenant_1",
variables={"tools": {"tool_a": {"enabled": False}}},
)
_set_request_json(monkeypatch, module, {"mcp_ids": ["id1"], "timeout": "2.0"})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, server))
close_calls = []
async def _thread_pool_exec_success(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
close_calls.append(args[0])
return None
return func(*args)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_success)
res = _run(module.list_tools.__wrapped__())
assert res["code"] == 0
assert res["data"]["id1"][0]["name"] == "tool_a"
assert res["data"]["id1"][0]["enabled"] is False
assert res["data"]["id1"][1]["enabled"] is True
assert close_calls and len(close_calls[-1]) == 1
_set_request_json(monkeypatch, module, {"mcp_ids": ["id1"], "timeout": "2.0"})
close_calls_inner = []
async def _thread_pool_exec_inner_error(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
close_calls_inner.append(args[0])
return None
raise RuntimeError("inner tools explode")
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_inner_error)
res = _run(module.list_tools.__wrapped__())
assert res["code"] == 102
assert "MCP list tools error" in res["message"]
assert close_calls_inner and len(close_calls_inner[-1]) == 1
_set_request_json(monkeypatch, module, {"mcp_ids": ["id1"], "timeout": "2.0"})
close_calls_outer = []
def _raise_get_by_id(_mcp_id):
raise RuntimeError("outer explode")
monkeypatch.setattr(module.MCPServerService, "get_by_id", _raise_get_by_id)
async def _thread_pool_exec_outer(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
close_calls_outer.append(args[0])
return None
return func(*args)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_outer)
res = _run(module.list_tools.__wrapped__())
assert res["code"] == 100
assert "outer explode" in res["message"]
assert close_calls_outer
@pytest.mark.p2
def test_test_tool_missing_mcp_id(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcp_id": "", "tool_name": "tool_a", "arguments": {"x": 1}})
res = _run(module.test_tool.__wrapped__())
assert "No MCP server ID provided" in res["message"]
@pytest.mark.p2
def test_test_tool_route_matrix_unit(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcp_id": "", "tool_name": "tool_a", "arguments": {"x": 1}})
res = _run(module.test_tool.__wrapped__())
assert "No MCP server ID provided" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "id1", "tool_name": "", "arguments": {"x": 1}})
res = _run(module.test_tool.__wrapped__())
assert "Require provide tool name and arguments" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "id1", "tool_name": "tool_a", "arguments": {}})
res = _run(module.test_tool.__wrapped__())
assert "Require provide tool name and arguments" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "id1", "tool_name": "tool_a", "arguments": {"x": 1}})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (False, None))
res = _run(module.test_tool.__wrapped__())
assert "Cannot find MCP server id1 for user tenant_1" in res["message"]
server_other = _DummyMCPServer(id="id1", name="srv", url="http://a", server_type="sse", tenant_id="other", variables={})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, server_other))
res = _run(module.test_tool.__wrapped__())
assert "Cannot find MCP server id1 for user tenant_1" in res["message"]
server_ok = _DummyMCPServer(id="id1", name="srv", url="http://a", server_type="sse", tenant_id="tenant_1", variables={})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, server_ok))
close_calls = []
async def _thread_pool_exec_success(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
close_calls.append(args[0])
return None
return func(*args)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_success)
res = _run(module.test_tool.__wrapped__())
assert res["code"] == 0
assert res["data"] == "ok"
assert close_calls and len(close_calls[-1]) == 1
async def _thread_pool_exec_raise(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
return None
raise RuntimeError("tool call explode")
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_raise)
res = _run(module.test_tool.__wrapped__())
assert res["code"] == 100
assert "tool call explode" in res["message"]
@pytest.mark.p2
def test_cache_tool_route_matrix_unit(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"mcp_id": "", "tools": [{"name": "tool_a"}]})
res = _run(module.cache_tool.__wrapped__())
assert "No MCP server ID provided" in res["message"]
_set_request_json(monkeypatch, module, {"mcp_id": "id1", "tools": [{"name": "tool_a"}]})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (False, None))
res = _run(module.cache_tool.__wrapped__())
assert "Cannot find MCP server id1 for user tenant_1" in res["message"]
server_other = _DummyMCPServer(id="id1", name="srv", url="http://a", server_type="sse", tenant_id="other", variables={})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, server_other))
res = _run(module.cache_tool.__wrapped__())
assert "Cannot find MCP server id1 for user tenant_1" in res["message"]
server_fail = _DummyMCPServer(id="id1", name="srv", url="http://a", server_type="sse", tenant_id="tenant_1", variables={})
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, server_fail))
monkeypatch.setattr(module.MCPServerService, "filter_update", lambda *_args, **_kwargs: False)
res = _run(module.cache_tool.__wrapped__())
assert "Failed to updated MCP server" in res["message"]
server_ok = _DummyMCPServer(
id="id1",
name="srv",
url="http://a",
server_type="sse",
tenant_id="tenant_1",
variables={"tools": {"old_tool": {"name": "old_tool"}}},
)
monkeypatch.setattr(module.MCPServerService, "get_by_id", lambda _mcp_id: (True, server_ok))
monkeypatch.setattr(module.MCPServerService, "filter_update", lambda *_args, **_kwargs: True)
_set_request_json(
monkeypatch,
module,
{
"mcp_id": "id1",
"tools": [{"name": "tool_a", "enabled": True}, {"bad": 1}, "x", {"name": "tool_b", "enabled": False}],
},
)
res = _run(module.cache_tool.__wrapped__())
assert res["code"] == 0
assert sorted(res["data"].keys()) == ["tool_a", "tool_b"]
assert server_ok.variables["tools"]["tool_b"]["enabled"] is False
@pytest.mark.p2
def test_test_mcp_route_matrix_unit(monkeypatch):
module = _load_mcp_server_app(monkeypatch)
_set_request_json(monkeypatch, module, {"url": "", "server_type": "sse"})
res = _run(module.test_mcp.__wrapped__())
assert "Invalid MCP url" in res["message"]
_set_request_json(monkeypatch, module, {"url": "http://a", "server_type": "invalid"})
res = _run(module.test_mcp.__wrapped__())
assert "Unsupported MCP server type" in res["message"]
close_calls = []
async def _thread_pool_exec_inner_error(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
close_calls.append(args[0])
return None
if getattr(func, "__name__", "") == "get_tools":
raise RuntimeError("get tools explode")
return func(*args)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_inner_error)
_set_request_json(monkeypatch, module, {"url": "http://a", "server_type": "sse"})
res = _run(module.test_mcp.__wrapped__())
assert res["code"] == 102
assert "Test MCP error: get tools explode" in res["message"]
assert close_calls and len(close_calls[-1]) == 1
close_calls_success = []
async def _thread_pool_exec_success(func, *args):
if func is module.close_multiple_mcp_toolcall_sessions:
close_calls_success.append(args[0])
return None
return func(*args)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec_success)
_set_request_json(monkeypatch, module, {"url": "http://a", "server_type": "sse"})
res = _run(module.test_mcp.__wrapped__())
assert res["code"] == 0
assert res["data"][0]["name"] == "tool_a"
assert all(tool["enabled"] is True for tool in res["data"])
assert close_calls_success and len(close_calls_success[-1]) == 1
def _raise_session(*_args, **_kwargs):
raise RuntimeError("session explode")
monkeypatch.setattr(module, "MCPToolCallSession", _raise_session)
_set_request_json(monkeypatch, module, {"url": "http://a", "server_type": "sse"})
res = _run(module.test_mcp.__wrapped__())
assert res["code"] == 100
assert "session explode" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_mcp_server_app/test_mcp_server_app_unit.py",
"license": "Apache License 2.0",
"lines": 691,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_message_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import sys
from copy import deepcopy
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyArgs(dict):
def getlist(self, key):
value = self.get(key)
if value is None:
return []
if isinstance(value, list):
return value
return [value]
class _DummyMemoryApiService:
async def add_message(self, *_args, **_kwargs):
return True, "ok"
async def get_messages(self, *_args, **_kwargs):
return []
def _run(coro):
return asyncio.run(coro)
def _load_memory_routes_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="user-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
services_mod = ModuleType("api.apps.services")
services_mod.memory_api_service = _DummyMemoryApiService()
monkeypatch.setitem(sys.modules, "api.apps.services", services_mod)
module_name = "test_message_routes_unit_module"
module_path = repo_root / "api" / "apps" / "restful_apis" / "memory_api.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(deepcopy(payload)))
@pytest.mark.p2
def test_add_message_partial_failure_branch(monkeypatch):
module = _load_memory_routes_module(monkeypatch)
_set_request_json(
monkeypatch,
module,
{
"memory_id": ["memory-1"],
"agent_id": "agent-1",
"session_id": "session-1",
"user_input": "hello",
"agent_response": "world",
},
)
async def _add_message(_memory_ids, _message_dict):
return False, "cannot enqueue"
monkeypatch.setattr(module.memory_api_service, "add_message", _add_message)
res = _run(inspect.unwrap(module.add_message)())
assert res["code"] == module.RetCode.SERVER_ERROR, res
assert "Some messages failed to add" in res["message"], res
@pytest.mark.p2
def test_get_messages_csv_and_missing_memory_ids(monkeypatch):
module = _load_memory_routes_module(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs({})))
res = _run(inspect.unwrap(module.get_messages)())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
assert "memory_ids is required." in res["message"], res
monkeypatch.setattr(
module,
"request",
SimpleNamespace(args=_DummyArgs({"memory_id": "m1,m2", "agent_id": "a1", "session_id": "s1", "limit": "5"})),
)
async def _get_messages(memory_ids, agent_id, session_id, limit):
assert memory_ids == ["m1", "m2"]
assert agent_id == "a1"
assert session_id == "s1"
assert limit == 5
return [{"message_id": 1}]
monkeypatch.setattr(module.memory_api_service, "get_messages", _get_messages)
res = _run(inspect.unwrap(module.get_messages)())
assert res["code"] == module.RetCode.SUCCESS, res
assert isinstance(res["data"], list), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_message_routes_unit.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_system_app/test_apps_init_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import logging
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
from werkzeug.exceptions import Unauthorized as WerkzeugUnauthorized
class _DummyAPIToken:
@staticmethod
def query(**_kwargs):
return []
class _DummyUserService:
@staticmethod
def query(**_kwargs):
return []
def _run(coro):
return asyncio.run(coro)
def _load_apps_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
settings_mod = ModuleType("common.settings")
settings_mod.SECRET_KEY = "test-secret-key"
settings_mod.init_settings = lambda: None
settings_mod.decrypt_database_config = lambda name=None: {}
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
common_pkg.settings = settings_mod
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.APIToken = _DummyAPIToken
db_models_mod.close_connection = lambda: None
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
services_mod = ModuleType("api.db.services")
services_mod.UserService = _DummyUserService
monkeypatch.setitem(sys.modules, "api.db.services", services_mod)
commands_mod = ModuleType("api.utils.commands")
commands_mod.register_commands = lambda _app: None
monkeypatch.setitem(sys.modules, "api.utils.commands", commands_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
def _get_json_result(code=0, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _server_error_response(error):
return {"code": 100, "message": repr(error)}
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.server_error_response = _server_error_response
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
module_name = "test_apps_init_unit_module"
module_path = repo_root / "api" / "apps" / "__init__.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
monkeypatch.setitem(sys.modules, module_name, module)
monkeypatch.setattr(Path, "glob", lambda self, _pattern: [])
spec.loader.exec_module(module)
return module.app, module
@pytest.mark.p2
def test_module_init_and_unauthorized_message_variants(monkeypatch):
_quart_app, apps_module = _load_apps_module(monkeypatch)
assert apps_module.client_urls_prefix == []
class _BrokenRepr:
def __repr__(self):
raise RuntimeError("repr explode")
class _ExactUnauthorizedRepr:
def __repr__(self):
return apps_module.UNAUTHORIZED_MESSAGE
class _Unauthorized401Repr:
def __repr__(self):
return "Unauthorized 401 from upstream"
class _OtherRepr:
def __repr__(self):
return "Forbidden 403"
assert apps_module._unauthorized_message(None) == apps_module.UNAUTHORIZED_MESSAGE
assert apps_module._unauthorized_message(_BrokenRepr()) == apps_module.UNAUTHORIZED_MESSAGE
assert apps_module._unauthorized_message(_ExactUnauthorizedRepr()) == apps_module.UNAUTHORIZED_MESSAGE
assert apps_module._unauthorized_message(_Unauthorized401Repr()) == "Unauthorized 401 from upstream"
assert apps_module._unauthorized_message(_OtherRepr()) == apps_module.UNAUTHORIZED_MESSAGE
@pytest.mark.p2
def test_load_user_token_edge_cases(monkeypatch):
quart_app, apps_module = _load_apps_module(monkeypatch)
user_with_empty_token = SimpleNamespace(email="empty@example.com", access_token="")
async def _case():
async with quart_app.test_request_context("/", headers={"Authorization": "token"}):
monkeypatch.setattr(apps_module.Serializer, "loads", lambda _self, _auth: "")
assert apps_module._load_user() is None
async with quart_app.test_request_context("/", headers={"Authorization": "token"}):
monkeypatch.setattr(apps_module.Serializer, "loads", lambda _self, _auth: "short-token")
assert apps_module._load_user() is None
async with quart_app.test_request_context("/", headers={"Authorization": "token"}):
monkeypatch.setattr(apps_module.Serializer, "loads", lambda _self, _auth: "a" * 32)
monkeypatch.setattr(apps_module.UserService, "query", lambda **_kwargs: [user_with_empty_token])
assert apps_module._load_user() is None
_run(_case())
@pytest.mark.p2
def test_load_user_api_token_fallback_and_fallback_exception(monkeypatch, caplog):
quart_app, apps_module = _load_apps_module(monkeypatch)
def _raise_decode(_self, _auth):
raise RuntimeError("decode failed")
monkeypatch.setattr(apps_module.Serializer, "loads", _raise_decode)
fallback_user_empty_token = SimpleNamespace(email="fallback@example.com", access_token="")
async def _case():
monkeypatch.setattr(apps_module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(apps_module.UserService, "query", lambda **_kwargs: [fallback_user_empty_token])
async with quart_app.test_request_context("/", headers={"Authorization": "Bearer api-token"}):
assert apps_module._load_user() is None
def _raise_api_token(**_kwargs):
raise RuntimeError("api token fallback failed")
monkeypatch.setattr(apps_module.APIToken, "query", _raise_api_token)
async with quart_app.test_request_context("/", headers={"Authorization": "Bearer api-token"}):
with caplog.at_level(logging.WARNING):
assert apps_module._load_user() is None
_run(_case())
assert "api token fallback failed" in caplog.text
@pytest.mark.p2
def test_login_required_timing_and_login_user_inactive(monkeypatch, caplog):
quart_app, apps_module = _load_apps_module(monkeypatch)
monkeypatch.setenv("RAGFLOW_API_TIMING", "1")
monkeypatch.setattr(apps_module, "current_user", SimpleNamespace(id="tenant-1"))
@apps_module.login_required
async def _timed_handler():
return {"ok": True}
async def _case():
async with quart_app.test_request_context("/timed"):
with caplog.at_level(logging.INFO):
assert await _timed_handler() == {"ok": True}
inactive_user = SimpleNamespace(id="user-1", is_active=False)
assert apps_module.login_user(inactive_user) is False
_run(_case())
assert "api_timing login_required" in caplog.text
@pytest.mark.p2
def test_logout_user_not_found_and_unauthorized_handlers(monkeypatch):
quart_app, apps_module = _load_apps_module(monkeypatch)
async def _case():
async with quart_app.test_request_context("/logout", headers={"Cookie": "remember_token=abc"}):
from quart import session
session["_user_id"] = "user-1"
session["_fresh"] = True
session["_id"] = "session-id"
session["_remember_seconds"] = 5
assert apps_module.logout_user() is True
assert "_user_id" not in session
assert "_fresh" not in session
assert "_id" not in session
assert session.get("_remember") == "clear"
assert "_remember_seconds" not in session
async with quart_app.test_request_context("/missing/path"):
not_found_resp, status = await apps_module.not_found(RuntimeError("missing"))
assert status == apps_module.RetCode.NOT_FOUND
payload = await not_found_resp.get_json()
assert payload["code"] == apps_module.RetCode.NOT_FOUND
assert payload["error"] == "Not Found"
assert "Not Found:" in payload["message"]
async with quart_app.test_request_context("/protected"):
@apps_module.login_required
async def _protected():
return {"ok": True}
monkeypatch.setattr(apps_module, "current_user", None)
with pytest.raises(apps_module.QuartAuthUnauthorized) as exc_info:
await _protected()
quart_payload, quart_status = await apps_module.unauthorized_quart_auth(exc_info.value)
assert quart_status == apps_module.RetCode.UNAUTHORIZED
assert quart_payload["code"] == apps_module.RetCode.UNAUTHORIZED
werk_payload, werk_status = await apps_module.unauthorized_werkzeug(WerkzeugUnauthorized("Unauthorized 401"))
assert werk_status == apps_module.RetCode.UNAUTHORIZED
assert werk_payload["code"] == apps_module.RetCode.UNAUTHORIZED
_run(_case())
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_system_app/test_apps_init_unit.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/utils/test_api_file_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for api.utils.file_utils (filename_type, thumbnail_img, sanitize_path, read_potential_broken_pdf)."""
import pytest
from api.db import FileType
from api.utils.file_utils import (
MAX_BLOB_SIZE_PDF,
MAX_BLOB_SIZE_THUMBNAIL,
GHOSTSCRIPT_TIMEOUT_SEC,
filename_type,
thumbnail_img,
thumbnail,
sanitize_path,
read_potential_broken_pdf,
repair_pdf_with_ghostscript,
)
class TestFilenameType:
"""Edge cases and robustness for filename_type."""
@pytest.mark.parametrize("filename,expected", [
("doc.pdf", FileType.PDF.value),
("a.PDF", FileType.PDF.value),
("x.png", FileType.VISUAL.value),
("file.docx", FileType.DOC.value),
("a/b/c.pdf", FileType.PDF.value),
("path/to/file.txt", FileType.DOC.value),
])
def test_valid_filenames(self, filename, expected):
assert filename_type(filename) == expected
@pytest.mark.parametrize("filename", [
None,
"",
" ",
123,
[],
])
def test_invalid_or_empty_returns_other(self, filename):
assert filename_type(filename) == FileType.OTHER.value
def test_path_with_basename_uses_extension(self):
assert filename_type("folder/subfolder/document.pdf") == FileType.PDF.value
class TestSanitizePath:
"""Edge cases for sanitize_path."""
@pytest.mark.parametrize("raw,expected", [
(None, ""),
("", ""),
(" ", ""),
(42, ""),
("a/b", "a/b"),
("a/../b", "a/b"),
("/leading/", "leading"),
("\\mixed\\path", "mixed/path"),
])
def test_sanitize_cases(self, raw, expected):
assert sanitize_path(raw) == expected
class TestReadPotentialBrokenPdf:
"""Edge cases and robustness for read_potential_broken_pdf."""
def test_none_returns_empty_bytes(self):
assert read_potential_broken_pdf(None) == b""
def test_empty_bytes_returns_as_is(self):
assert read_potential_broken_pdf(b"") == b""
def test_non_len_raises_or_returns_empty(self):
class NoLen:
pass
result = read_potential_broken_pdf(NoLen())
assert result == b""
class TestThumbnailImg:
"""Edge cases for thumbnail_img."""
def test_none_blob_returns_none(self):
assert thumbnail_img("x.pdf", None) is None
def test_none_filename_returns_none(self):
assert thumbnail_img(None, b"fake pdf content") is None
def test_empty_blob_returns_none(self):
assert thumbnail_img("x.pdf", b"") is None
def test_empty_filename_returns_none(self):
assert thumbnail_img("", b"x") is None
def test_oversized_blob_returns_none(self):
huge = b"x" * (MAX_BLOB_SIZE_THUMBNAIL + 1)
assert thumbnail_img("x.pdf", huge) is None
class TestThumbnail:
"""thumbnail() wraps thumbnail_img and returns base64 or empty string."""
def test_none_img_returns_empty_string(self):
assert thumbnail("x.xyz", b"garbage") == ""
def test_valid_img_returns_base64_prefix(self):
from api.constants import IMG_BASE64_PREFIX
result = thumbnail("x.png", b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDATx\x9cc\xf8\x0f\x00\x00\x01\x01\x00\x05\x18\xd8N\x00\x00\x00\x00IEND\xaeB`\x82")
assert result.startswith(IMG_BASE64_PREFIX) or result == ""
class TestRepairPdfWithGhostscript:
"""repair_pdf_with_ghostscript edge cases."""
def test_none_returns_empty_bytes(self):
assert repair_pdf_with_ghostscript(None) == b""
def test_empty_bytes_returns_empty(self):
assert repair_pdf_with_ghostscript(b"") == b""
def test_oversized_returns_original_without_calling_gs(self):
huge = b"%" * (MAX_BLOB_SIZE_PDF + 1)
result = repair_pdf_with_ghostscript(huge)
assert result == huge
class TestConstants:
"""Resource limit constants are positive and reasonable."""
def test_thumbnail_limit_positive(self):
assert MAX_BLOB_SIZE_THUMBNAIL > 0
def test_pdf_limit_positive(self):
assert MAX_BLOB_SIZE_PDF > 0
def test_gs_timeout_positive(self):
assert GHOSTSCRIPT_TIMEOUT_SEC > 0
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/utils/test_api_file_utils.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/utils/test_health_utils_minio.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for MinIO health check (check_minio_alive) and scheme/verify helpers.
Covers SSL/HTTPS and certificate verification (issues #13158, #13159).
"""
from unittest.mock import patch, Mock
class TestMinioSchemeAndVerify:
"""Test _minio_scheme_and_verify helper."""
@patch("api.utils.health_utils.settings")
def test_scheme_http_when_secure_false(self, mock_settings):
mock_settings.MINIO = {"host": "minio:9000", "secure": False}
from api.utils.health_utils import _minio_scheme_and_verify
scheme, verify = _minio_scheme_and_verify()
assert scheme == "http"
assert verify is True
@patch("api.utils.health_utils.settings")
def test_scheme_https_when_secure_true(self, mock_settings):
mock_settings.MINIO = {"host": "minio:9000", "secure": True}
from api.utils.health_utils import _minio_scheme_and_verify
scheme, verify = _minio_scheme_and_verify()
assert scheme == "https"
assert verify is True
@patch("api.utils.health_utils.settings")
def test_scheme_https_when_secure_string_true(self, mock_settings):
mock_settings.MINIO = {"host": "minio:9000", "secure": "true"}
from api.utils.health_utils import _minio_scheme_and_verify
scheme, verify = _minio_scheme_and_verify()
assert scheme == "https"
@patch("api.utils.health_utils.settings")
def test_verify_false_for_self_signed(self, mock_settings):
mock_settings.MINIO = {"host": "minio:9000", "secure": True, "verify": False}
from api.utils.health_utils import _minio_scheme_and_verify
scheme, verify = _minio_scheme_and_verify()
assert scheme == "https"
assert verify is False
@patch("api.utils.health_utils.settings")
def test_verify_string_false(self, mock_settings):
mock_settings.MINIO = {"host": "minio:9000", "verify": "false"}
from api.utils.health_utils import _minio_scheme_and_verify
_, verify = _minio_scheme_and_verify()
assert verify is False
@patch("api.utils.health_utils.settings")
def test_default_verify_true_when_key_missing(self, mock_settings):
mock_settings.MINIO = {"host": "minio:9000"}
from api.utils.health_utils import _minio_scheme_and_verify
_, verify = _minio_scheme_and_verify()
assert verify is True
class TestCheckMinioAlive:
"""Test check_minio_alive with mocked requests and settings."""
@patch("api.utils.health_utils.requests.get")
@patch("api.utils.health_utils.settings")
def test_returns_alive_when_http_200(self, mock_settings, mock_get):
mock_settings.MINIO = {"host": "minio:9000", "secure": False}
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
from api.utils.health_utils import check_minio_alive
result = check_minio_alive()
assert result["status"] == "alive"
assert "elapsed" in result["message"]
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args[0][0] == "http://minio:9000/minio/health/live"
assert call_args[1]["verify"] is True
@patch("api.utils.health_utils.requests.get")
@patch("api.utils.health_utils.settings")
def test_uses_https_when_secure_true(self, mock_settings, mock_get):
mock_settings.MINIO = {"host": "minio:9000", "secure": True}
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
from api.utils.health_utils import check_minio_alive
check_minio_alive()
call_args = mock_get.call_args
assert call_args[0][0] == "https://minio:9000/minio/health/live"
@patch("api.utils.health_utils.requests.get")
@patch("api.utils.health_utils.settings")
def test_passes_verify_false_for_self_signed(self, mock_settings, mock_get):
mock_settings.MINIO = {"host": "minio:9000", "secure": True, "verify": False}
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
from api.utils.health_utils import check_minio_alive
check_minio_alive()
call_args = mock_get.call_args
assert call_args[1]["verify"] is False
@patch("api.utils.health_utils.requests.get")
@patch("api.utils.health_utils.settings")
def test_returns_timeout_on_non_200(self, mock_settings, mock_get):
mock_settings.MINIO = {"host": "minio:9000"}
mock_response = Mock()
mock_response.status_code = 503
mock_get.return_value = mock_response
from api.utils.health_utils import check_minio_alive
result = check_minio_alive()
assert result["status"] == "timeout"
@patch("api.utils.health_utils.requests.get")
@patch("api.utils.health_utils.settings")
def test_returns_timeout_on_request_exception(self, mock_settings, mock_get):
mock_settings.MINIO = {"host": "minio:9000"}
mock_get.side_effect = ConnectionError("Connection refused")
from api.utils.health_utils import check_minio_alive
result = check_minio_alive()
assert result["status"] == "timeout"
assert "error" in result["message"]
@patch("api.utils.health_utils.requests.get")
@patch("api.utils.health_utils.settings")
def test_request_uses_timeout(self, mock_settings, mock_get):
mock_settings.MINIO = {"host": "minio:9000"}
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
from api.utils.health_utils import check_minio_alive
check_minio_alive()
call_args = mock_get.call_args
assert call_args[1]["timeout"] == 10
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/utils/test_health_utils_minio.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/utils/test_minio_conn_ssl.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for MinIO client SSL/secure configuration (_build_minio_http_client).
Covers issue #13158.
"""
import ssl
from unittest.mock import patch
class TestBuildMinioHttpClient:
"""Test _build_minio_http_client helper."""
@patch("rag.utils.minio_conn.settings")
def test_returns_none_when_verify_true(self, mock_settings):
mock_settings.MINIO = {"verify": True}
from rag.utils.minio_conn import _build_minio_http_client
client = _build_minio_http_client()
assert client is None
@patch("rag.utils.minio_conn.settings")
def test_returns_none_when_verify_missing(self, mock_settings):
mock_settings.MINIO = {}
from rag.utils.minio_conn import _build_minio_http_client
client = _build_minio_http_client()
assert client is None
@patch("rag.utils.minio_conn.settings")
def test_returns_pool_manager_when_verify_false(self, mock_settings):
mock_settings.MINIO = {"verify": False}
from rag.utils.minio_conn import _build_minio_http_client
client = _build_minio_http_client()
assert client is not None
assert hasattr(client, "connection_pool_kw")
assert client.connection_pool_kw.get("cert_reqs") == ssl.CERT_NONE
@patch("rag.utils.minio_conn.settings")
def test_returns_pool_manager_when_verify_string_false(self, mock_settings):
mock_settings.MINIO = {"verify": "false"}
from rag.utils.minio_conn import _build_minio_http_client
client = _build_minio_http_client()
assert client is not None
assert client.connection_pool_kw.get("cert_reqs") == ssl.CERT_NONE
@patch("rag.utils.minio_conn.settings")
def test_returns_none_when_verify_string_1(self, mock_settings):
mock_settings.MINIO = {"verify": "1"}
from rag.utils.minio_conn import _build_minio_http_client
client = _build_minio_http_client()
assert client is None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/utils/test_minio_conn_ssl.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/apps/restful_apis/memory_api.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from quart import request
from common.constants import RetCode
from common.exceptions import ArgumentException, NotFoundException
from api.apps import login_required
from api.utils.api_utils import validate_request, get_request_json, get_error_argument_result, get_json_result
from api.apps.services import memory_api_service
@manager.route("/memories", methods=["POST"]) # noqa: F821
@login_required
@validate_request("name", "memory_type", "embd_id", "llm_id")
async def create_memory():
timing_enabled = os.getenv("RAGFLOW_API_TIMING")
t_start = time.perf_counter() if timing_enabled else None
req = await get_request_json()
t_parsed = time.perf_counter() if timing_enabled else None
try:
memory_info = {
"name": req["name"],
"memory_type": req["memory_type"],
"embd_id": req["embd_id"],
"llm_id": req["llm_id"]
}
success, res = await memory_api_service.create_memory(memory_info)
if timing_enabled:
logging.info(
"api_timing create_memory parse_ms=%.2f validate_and_db_ms=%.2f total_ms=%.2f path=%s",
(t_parsed - t_start) * 1000,
(time.perf_counter() - t_parsed) * 1000,
(time.perf_counter() - t_start) * 1000,
request.path,
)
if success:
return get_json_result(message=True, data=res)
else:
return get_json_result(message=res, code=RetCode.SERVER_ERROR)
except ArgumentException as arg_error:
logging.error(arg_error)
if timing_enabled:
logging.info(
"api_timing create_memory error=%s parse_ms=%.2f total_ms=%.2f path=%s",
str(arg_error),
(t_parsed - t_start) * 1000,
(time.perf_counter() - t_start) * 1000,
request.path,
)
return get_error_argument_result(str(arg_error))
except Exception as e:
logging.error(e)
if timing_enabled:
logging.info(
"api_timing create_memory error=%s parse_ms=%.2f total_ms=%.2f path=%s",
str(e),
(t_parsed - t_start) * 1000,
(time.perf_counter() - t_start) * 1000,
request.path,
)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/memories/<memory_id>", methods=["PUT"]) # noqa: F821
@login_required
async def update_memory(memory_id):
req = await get_request_json()
new_settings = {k: req[k] for k in [
"name", "permissions", "llm_id", "embd_id", "memory_type", "memory_size", "forgetting_policy", "temperature",
"avatar", "description", "system_prompt", "user_prompt"
] if k in req}
try:
success, res = await memory_api_service.update_memory(memory_id, new_settings)
if success:
return get_json_result(message=True, data=res)
else:
return get_json_result(message=res, code=RetCode.SERVER_ERROR)
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except ArgumentException as arg_error:
logging.error(arg_error)
return get_error_argument_result(str(arg_error))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/memories/<memory_id>", methods=["DELETE"]) # noqa: F821
@login_required
async def delete_memory(memory_id):
try:
await memory_api_service.delete_memory(memory_id)
return get_json_result(message=True)
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/memories", methods=["GET"]) # noqa: F821
@login_required
async def list_memory():
filter_params = {
k: request.args.get(k) for k in ["memory_type", "tenant_id", "storage_type"] if k in request.args
}
keywords = request.args.get("keywords")
page = int(request.args.get("page", 1))
page_size = int(request.args.get("page_size", 50))
try:
res = await memory_api_service.list_memory(filter_params, keywords, page, page_size)
return get_json_result(message=True, data=res)
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/memories/<memory_id>/config", methods=["GET"]) # noqa: F821
@login_required
async def get_memory_config(memory_id):
try:
res = await memory_api_service.get_memory_config(memory_id)
return get_json_result(message=True, data=res)
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/memories/<memory_id>", methods=["GET"]) # noqa: F821
@login_required
async def get_memory_messages(memory_id):
args = request.args
agent_ids = args.getlist("agent_id")
if len(agent_ids) == 1 and ',' in agent_ids[0]:
agent_ids = agent_ids[0].split(',')
keywords = args.get("keywords", "")
keywords = keywords.strip()
page = int(args.get("page", 1))
page_size = int(args.get("page_size", 50))
try:
res = await memory_api_service.get_memory_messages(
memory_id, agent_ids, keywords, page, page_size
)
return get_json_result(message=True, data=res)
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/messages", methods=["POST"]) # noqa: F821
@login_required
@validate_request("memory_id", "agent_id", "session_id", "user_input", "agent_response")
async def add_message():
req = await get_request_json()
memory_ids = req["memory_id"]
message_dict = {
"user_id": req.get("user_id"),
"agent_id": req["agent_id"],
"session_id": req["session_id"],
"user_input": req["user_input"],
"agent_response": req["agent_response"],
}
res, msg = await memory_api_service.add_message(memory_ids, message_dict)
if res:
return get_json_result(message=msg)
return get_json_result(message="Some messages failed to add. Detail:" + msg, code=RetCode.SERVER_ERROR)
@manager.route("/messages/<memory_id>:<message_id>", methods=["DELETE"]) # noqa: F821
@login_required
async def forget_message(memory_id: str, message_id: int):
try:
res = await memory_api_service.forget_message(memory_id, message_id)
return get_json_result(message=res)
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/messages/<memory_id>:<message_id>", methods=["PUT"]) # noqa: F821
@login_required
@validate_request("status")
async def update_message(memory_id: str, message_id: int):
req = await get_request_json()
status = req["status"]
if not isinstance(status, bool):
return get_error_argument_result("Status must be a boolean.")
try:
update_succeed = await memory_api_service.update_message_status(memory_id, message_id, status)
if update_succeed:
return get_json_result(message=update_succeed)
else:
return get_json_result(code=RetCode.SERVER_ERROR, message=f"Failed to set status for message '{message_id}' in memory '{memory_id}'.")
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/messages/search", methods=["GET"]) # noqa: F821
@login_required
async def search_message():
args = request.args
memory_ids = args.getlist("memory_id")
if len(memory_ids) == 1 and ',' in memory_ids[0]:
memory_ids = memory_ids[0].split(',')
query = args.get("query")
similarity_threshold = float(args.get("similarity_threshold", 0.2))
keywords_similarity_weight = float(args.get("keywords_similarity_weight", 0.7))
top_n = int(args.get("top_n", 5))
agent_id = args.get("agent_id", "")
session_id = args.get("session_id", "")
filter_dict = {
"memory_id": memory_ids,
"agent_id": agent_id,
"session_id": session_id
}
params = {
"query": query,
"similarity_threshold": similarity_threshold,
"keywords_similarity_weight": keywords_similarity_weight,
"top_n": top_n
}
res = await memory_api_service.search_message(filter_dict, params)
return get_json_result(message=True, data=res)
@manager.route("/messages", methods=["GET"]) # noqa: F821
@login_required
async def get_messages():
args = request.args
memory_ids = args.getlist("memory_id")
if len(memory_ids) == 1 and ',' in memory_ids[0]:
memory_ids = memory_ids[0].split(',')
agent_id = args.get("agent_id", "")
session_id = args.get("session_id", "")
limit = int(args.get("limit", 10))
if not memory_ids:
return get_error_argument_result("memory_ids is required.")
try:
res = await memory_api_service.get_messages(memory_ids, agent_id, session_id, limit)
return get_json_result(message=True, data=res)
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
@manager.route("/messages/<memory_id>:<message_id>/content", methods=["GET"]) # noqa: F821
@login_required
async def get_message_content(memory_id: str, message_id: int):
try:
res = await memory_api_service.get_message_content(memory_id, message_id)
return get_json_result(message=True, data=res)
except NotFoundException as not_found_exception:
logging.error(not_found_exception)
return get_json_result(code=RetCode.NOT_FOUND, message=str(not_found_exception))
except Exception as e:
logging.error(e)
return get_json_result(code=RetCode.SERVER_ERROR, message="Internal server error")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/restful_apis/memory_api.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/apps/services/memory_api_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from api.apps import current_user
from api.db import TenantPermission
from api.db.services.memory_service import MemoryService
from api.db.services.user_service import UserTenantService
from api.db.services.canvas_service import UserCanvasService
from api.db.services.task_service import TaskService
from api.db.joint_services.memory_message_service import get_memory_size_cache, judge_system_prompt_is_default, queue_save_to_memory_task, query_message
from api.utils.memory_utils import format_ret_data_from_memory, get_memory_type_human
from api.constants import MEMORY_NAME_LIMIT, MEMORY_SIZE_LIMIT
from memory.services.messages import MessageService
from memory.utils.prompt_util import PromptAssembler
from common.constants import MemoryType, ForgettingPolicy
from common.exceptions import ArgumentException, NotFoundException
from common.time_utils import current_timestamp, timestamp_to_date
async def create_memory(memory_info: dict):
"""
:param memory_info: {
"name": str,
"memory_type": list[str],
"embd_id": str,
"llm_id": str
}
"""
# check name length
name = memory_info["name"]
memory_name = name.strip()
if len(memory_name) == 0:
raise ArgumentException("Memory name cannot be empty or whitespace.")
if len(memory_name) > MEMORY_NAME_LIMIT:
raise ArgumentException(f"Memory name '{memory_name}' exceeds limit of {MEMORY_NAME_LIMIT}.")
# check memory_type valid
if not isinstance(memory_info["memory_type"], list):
raise ArgumentException("Memory type must be a list.")
memory_type = set(memory_info["memory_type"])
invalid_type = memory_type - {e.name.lower() for e in MemoryType}
if invalid_type:
raise ArgumentException(f"Memory type '{invalid_type}' is not supported.")
memory_type = list(memory_type)
success, res = MemoryService.create_memory(
tenant_id=current_user.id,
name=memory_name,
memory_type=memory_type,
embd_id=memory_info["embd_id"],
llm_id=memory_info["llm_id"]
)
if success:
return True, format_ret_data_from_memory(res)
else:
return False, res
async def update_memory(memory_id: str, new_memory_setting: dict):
"""
:param memory_id: str
:param new_memory_setting: {
"name": str,
"permissions": str,
"llm_id": str,
"embd_id": str,
"memory_type": list[str],
"memory_size": int,
"forgetting_policy": str,
"temperature": float,
"avatar": str,
"description": str,
"system_prompt": str,
"user_prompt": str
}
"""
update_dict = {}
# check name length
if "name" in new_memory_setting:
name = new_memory_setting["name"]
memory_name = name.strip()
if len(memory_name) == 0:
raise ArgumentException("Memory name cannot be empty or whitespace.")
if len(memory_name) > MEMORY_NAME_LIMIT:
raise ArgumentException(f"Memory name '{memory_name}' exceeds limit of {MEMORY_NAME_LIMIT}.")
update_dict["name"] = memory_name
# check permissions valid
if new_memory_setting.get("permissions"):
if new_memory_setting["permissions"] not in [e.value for e in TenantPermission]:
raise ArgumentException(f"Unknown permission '{new_memory_setting['permissions']}'.")
update_dict["permissions"] = new_memory_setting["permissions"]
if new_memory_setting.get("llm_id"):
update_dict["llm_id"] = new_memory_setting["llm_id"]
if new_memory_setting.get("embd_id"):
update_dict["embd_id"] = new_memory_setting["embd_id"]
if new_memory_setting.get("memory_type"):
memory_type = set(new_memory_setting["memory_type"])
invalid_type = memory_type - {e.name.lower() for e in MemoryType}
if invalid_type:
raise ArgumentException(f"Memory type '{invalid_type}' is not supported.")
update_dict["memory_type"] = list(memory_type)
# check memory_size valid
if new_memory_setting.get("memory_size"):
if not 0 < int(new_memory_setting["memory_size"]) <= MEMORY_SIZE_LIMIT:
raise ArgumentException(f"Memory size should be in range (0, {MEMORY_SIZE_LIMIT}] Bytes.")
update_dict["memory_size"] = new_memory_setting["memory_size"]
# check forgetting_policy valid
if new_memory_setting.get("forgetting_policy"):
if new_memory_setting["forgetting_policy"] not in [e.value for e in ForgettingPolicy]:
raise ArgumentException(f"Forgetting policy '{new_memory_setting['forgetting_policy']}' is not supported.")
update_dict["forgetting_policy"] = new_memory_setting["forgetting_policy"]
# check temperature valid
if "temperature" in new_memory_setting:
temperature = float(new_memory_setting["temperature"])
if not 0 <= temperature <= 1:
raise ArgumentException("Temperature should be in range [0, 1].")
update_dict["temperature"] = temperature
# allow update to empty fields
for field in ["avatar", "description", "system_prompt", "user_prompt"]:
if field in new_memory_setting:
update_dict[field] = new_memory_setting[field]
current_memory = MemoryService.get_by_memory_id(memory_id)
if not current_memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
memory_dict = current_memory.to_dict()
memory_dict.update({"memory_type": get_memory_type_human(current_memory.memory_type)})
to_update = {}
for k, v in update_dict.items():
if isinstance(v, list) and set(memory_dict[k]) != set(v):
to_update[k] = v
elif memory_dict[k] != v:
to_update[k] = v
if not to_update:
return True, memory_dict
# check memory empty when update embd_id, memory_type
memory_size = get_memory_size_cache(memory_id, current_memory.tenant_id)
not_allowed_update = [f for f in ["embd_id", "memory_type"] if f in to_update and memory_size > 0]
if not_allowed_update:
raise ArgumentException(f"Can't update {not_allowed_update} when memory isn't empty.")
if "memory_type" in to_update:
if "system_prompt" not in to_update and judge_system_prompt_is_default(current_memory.system_prompt, current_memory.memory_type):
# update old default prompt, assemble a new one
to_update["system_prompt"] = PromptAssembler.assemble_system_prompt({"memory_type": to_update["memory_type"]})
MemoryService.update_memory(current_memory.tenant_id, memory_id, to_update)
updated_memory = MemoryService.get_by_memory_id(memory_id)
return True, format_ret_data_from_memory(updated_memory)
async def delete_memory(memory_id):
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
MemoryService.delete_memory(memory_id)
if MessageService.has_index(memory.tenant_id, memory_id):
MessageService.delete_message({"memory_id": memory_id}, memory.tenant_id, memory_id)
return True
async def list_memory(filter_params: dict, keywords: str, page: int=1, page_size: int = 50):
"""
:param filter_params: {
"memory_type": list[str],
"tenant_id": list[str],
"storage_type": str
}
:param keywords: str
:param page: int
:param page_size: int
"""
filter_dict: dict = {"storage_type": filter_params.get("storage_type")}
tenant_ids = filter_params.get("tenant_id")
if not filter_params.get("tenant_id"):
# restrict to current user's tenants
user_tenants = UserTenantService.get_user_tenant_relation_by_user_id(current_user.id)
filter_dict["tenant_id"] = [tenant["tenant_id"] for tenant in user_tenants]
else:
if len(tenant_ids) == 1 and ',' in tenant_ids[0]:
tenant_ids = tenant_ids[0].split(',')
filter_dict["tenant_id"] = tenant_ids
memory_types = filter_params.get("memory_type")
if memory_types and len(memory_types) == 1 and ',' in memory_types[0]:
memory_types = memory_types[0].split(',')
filter_dict["memory_type"] = memory_types
memory_list, count = MemoryService.get_by_filter(filter_dict, keywords, page, page_size)
[memory.update({"memory_type": get_memory_type_human(memory["memory_type"])}) for memory in memory_list]
return {
"memory_list": memory_list, "total_count": count
}
async def get_memory_config(memory_id):
memory = MemoryService.get_with_owner_name_by_id(memory_id)
if not memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
return format_ret_data_from_memory(memory)
async def get_memory_messages(memory_id, agent_ids: list[str], keywords: str, page: int=1, page_size: int = 50):
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
messages = MessageService.list_message(
memory.tenant_id, memory_id, agent_ids, keywords, page, page_size)
agent_name_mapping = {}
extract_task_mapping = {}
if messages["message_list"]:
agent_list = UserCanvasService.get_basic_info_by_canvas_ids([message["agent_id"] for message in messages["message_list"]])
agent_name_mapping = {agent["id"]: agent["title"] for agent in agent_list}
task_list = TaskService.get_tasks_progress_by_doc_ids([memory_id])
if task_list:
task_list.sort(key=lambda t: t["create_time"]) # asc, use newer when exist more than one task
for task in task_list:
# the 'digest' field carries the source_id when a task is created, so use 'digest' as key
extract_task_mapping.update({int(task["digest"]): task})
for message in messages["message_list"]:
message["agent_name"] = agent_name_mapping.get(message["agent_id"], "Unknown")
message["task"] = extract_task_mapping.get(message["message_id"], {})
for extract_msg in message["extract"]:
extract_msg["agent_name"] = agent_name_mapping.get(extract_msg["agent_id"], "Unknown")
return {"messages": messages, "storage_type": memory.storage_type}
async def add_message(memory_ids: list[str], message_dict: dict):
"""
:param memory_ids: list[str]
:param message_dict: {
"agent_id": str,
"session_id": str,
"user_input": str,
"agent_response": str,
"message_type": str
}
"""
return await queue_save_to_memory_task(memory_ids, message_dict)
async def forget_message(memory_id: str, message_id: int):
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
forget_time = timestamp_to_date(current_timestamp())
update_succeed = MessageService.update_message(
{"memory_id": memory_id, "message_id": int(message_id)},
{"forget_at": forget_time},
memory.tenant_id, memory_id)
if update_succeed:
return True
raise Exception(f"Failed to forget message '{message_id}' in memory '{memory_id}'.")
async def update_message_status(memory_id: str, message_id: int, status: bool):
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
update_succeed = MessageService.update_message(
{"memory_id": memory_id, "message_id": int(message_id)},
{"status": status},
memory.tenant_id, memory_id)
if update_succeed:
return True
raise Exception(f"Failed to set status for message '{message_id}' in memory '{memory_id}'.")
async def search_message(filter_dict: dict, params: dict):
"""
:param filter_dict: {
"memory_id": list[str],
"agent_id": str,
"session_id": str
}
:param params: {
"query": str,
"similarity_threshold": float,
"keywords_similarity_weight": float,
"top_n": int
}
"""
return query_message(filter_dict, params)
async def get_messages(memory_ids: list[str], agent_id: str = "", session_id: str = "", limit: int = 10):
"""
Get recent messages from specified memories.
:param memory_ids: list of memory IDs
:param agent_id: optional agent ID for filtering
:param session_id: optional session ID for filtering
:param limit: maximum number of messages to return
:return: list of recent messages
"""
memory_list = MemoryService.get_by_ids(memory_ids)
uids = [memory.tenant_id for memory in memory_list]
res = MessageService.get_recent_messages(
uids,
memory_ids,
agent_id,
session_id,
limit
)
return res
async def get_message_content(memory_id: str, message_id: int):
"""
Get content of a specific message from a memory.
:param memory_id: memory ID
:param message_id: message ID
:return: message content
:raises NotFoundException: if memory or message not found
"""
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
raise NotFoundException(f"Memory '{memory_id}' not found.")
res = MessageService.get_by_message_id(memory_id, message_id, memory.tenant_id)
if res:
return res
raise NotFoundException(f"Message '{message_id}' in memory '{memory_id}' not found.") | {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/services/memory_api_service.py",
"license": "Apache License 2.0",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/testcases/utils/engine_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import requests
_DOC_ENGINE_CACHE = None
def get_doc_engine(rag=None) -> str:
"""Return lower-cased doc_engine from env, or from /system/status if env is unset."""
global _DOC_ENGINE_CACHE
env = (os.getenv("DOC_ENGINE") or "").strip().lower()
if env:
_DOC_ENGINE_CACHE = env
return env
if _DOC_ENGINE_CACHE:
return _DOC_ENGINE_CACHE
if rag is None:
return ""
try:
api_url = getattr(rag, "api_url", "")
if "/api/" in api_url:
base_url, version = api_url.rsplit("/api/", 1)
status_url = f"{base_url}/{version}/system/status"
else:
status_url = f"{api_url}/system/status"
headers = getattr(rag, "authorization_header", {})
res = requests.get(status_url, headers=headers).json()
engine = str(res.get("data", {}).get("doc_engine", {}).get("type", "")).lower()
if engine:
_DOC_ENGINE_CACHE = engine
return engine
except Exception:
return ""
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/utils/engine_utils.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:common/data_source/rdbms_connector.py | """RDBMS (MySQL/PostgreSQL) data source connector for importing data from relational databases."""
import hashlib
import json
import logging
from datetime import datetime, timezone
from enum import Enum
from typing import Any, Dict, Generator, Optional, Union
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
)
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document
class DatabaseType(str, Enum):
"""Supported database types."""
MYSQL = "mysql"
POSTGRESQL = "postgresql"
class RDBMSConnector(LoadConnector, PollConnector):
"""
RDBMS connector for importing data from MySQL and PostgreSQL databases.
This connector allows users to:
1. Connect to a MySQL or PostgreSQL database
2. Execute a SQL query to extract data
3. Map columns to content (for vectorization) and metadata
4. Sync data in batch or incremental mode using a timestamp column
"""
def __init__(
self,
db_type: str,
host: str,
port: int,
database: str,
query: str,
content_columns: str,
metadata_columns: Optional[str] = None,
id_column: Optional[str] = None,
timestamp_column: Optional[str] = None,
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
"""
Initialize the RDBMS connector.
Args:
db_type: Database type ('mysql' or 'postgresql')
host: Database host
port: Database port
database: Database name
query: SQL query to execute (e.g., "SELECT * FROM products WHERE status = 'active'")
content_columns: Comma-separated column names to use for document content
metadata_columns: Comma-separated column names to use as metadata (optional)
id_column: Column to use as unique document ID (optional, will generate hash if not provided)
timestamp_column: Column to use for incremental sync (optional, must be datetime/timestamp type)
batch_size: Number of documents per batch
"""
self.db_type = DatabaseType(db_type.lower())
self.host = host.strip()
self.port = port
self.database = database.strip()
self.query = query.strip()
self.content_columns = [c.strip() for c in content_columns.split(",") if c.strip()]
self.metadata_columns = [c.strip() for c in (metadata_columns or "").split(",") if c.strip()]
self.id_column = id_column.strip() if id_column else None
self.timestamp_column = timestamp_column.strip() if timestamp_column else None
self.batch_size = batch_size
self._connection = None
self._credentials: Dict[str, Any] = {}
def load_credentials(self, credentials: Dict[str, Any]) -> Dict[str, Any] | None:
"""Load database credentials."""
logging.debug(f"Loading credentials for {self.db_type} database: {self.database}")
required_keys = ["username", "password"]
for key in required_keys:
if not credentials.get(key):
raise ConnectorMissingCredentialError(f"RDBMS ({self.db_type}): missing {key}")
self._credentials = credentials
return None
def _get_connection(self):
"""Create and return a database connection."""
if self._connection is not None:
return self._connection
username = self._credentials.get("username")
password = self._credentials.get("password")
if self.db_type == DatabaseType.MYSQL:
try:
import mysql.connector
except ImportError:
raise ConnectorValidationError(
"MySQL connector not installed. Please install mysql-connector-python."
)
try:
self._connection = mysql.connector.connect(
host=self.host,
port=self.port,
database=self.database,
user=username,
password=password,
charset='utf8mb4',
use_unicode=True,
)
except Exception as e:
raise ConnectorValidationError(f"Failed to connect to MySQL: {e}")
elif self.db_type == DatabaseType.POSTGRESQL:
try:
import psycopg2
except ImportError:
raise ConnectorValidationError(
"PostgreSQL connector not installed. Please install psycopg2-binary."
)
try:
self._connection = psycopg2.connect(
host=self.host,
port=self.port,
dbname=self.database,
user=username,
password=password,
)
except Exception as e:
raise ConnectorValidationError(f"Failed to connect to PostgreSQL: {e}")
return self._connection
def _close_connection(self):
"""Close the database connection."""
if self._connection is not None:
try:
self._connection.close()
except Exception:
pass
self._connection = None
def _get_tables(self) -> list[str]:
"""Get list of all tables in the database."""
connection = self._get_connection()
cursor = connection.cursor()
try:
if self.db_type == DatabaseType.MYSQL:
cursor.execute("SHOW TABLES")
else:
cursor.execute(
"SELECT table_name FROM information_schema.tables "
"WHERE table_schema = 'public' AND table_type = 'BASE TABLE'"
)
tables = [row[0] for row in cursor.fetchall()]
return tables
finally:
cursor.close()
def _build_query_with_time_filter(
self,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
) -> str:
"""Build the query with optional time filtering for incremental sync."""
if not self.query:
return "" # Will be handled by table discovery
base_query = self.query.rstrip(";")
if not self.timestamp_column or (start is None and end is None):
return base_query
has_where = "where" in base_query.lower()
connector = " AND" if has_where else " WHERE"
time_conditions = []
if start is not None:
if self.db_type == DatabaseType.MYSQL:
time_conditions.append(f"{self.timestamp_column} > '{start.strftime('%Y-%m-%d %H:%M:%S')}'")
else:
time_conditions.append(f"{self.timestamp_column} > '{start.isoformat()}'")
if end is not None:
if self.db_type == DatabaseType.MYSQL:
time_conditions.append(f"{self.timestamp_column} <= '{end.strftime('%Y-%m-%d %H:%M:%S')}'")
else:
time_conditions.append(f"{self.timestamp_column} <= '{end.isoformat()}'")
if time_conditions:
return f"{base_query}{connector} {' AND '.join(time_conditions)}"
return base_query
def _row_to_document(self, row: Union[tuple, list, Dict[str, Any]], column_names: list) -> Document:
"""Convert a database row to a Document."""
row_dict = dict(zip(column_names, row)) if isinstance(row, (list, tuple)) else row
content_parts = []
for col in self.content_columns:
if col in row_dict and row_dict[col] is not None:
value = row_dict[col]
if isinstance(value, (dict, list)):
value = json.dumps(value, ensure_ascii=False)
# Use brackets around field name to ensure it's distinguishable
# after chunking (TxtParser strips \n delimiters during merge)
content_parts.append(f"【{col}】: {value}")
content = "\n".join(content_parts)
if self.id_column and self.id_column in row_dict:
doc_id = f"{self.db_type}:{self.database}:{row_dict[self.id_column]}"
else:
content_hash = hashlib.md5(content.encode()).hexdigest()
doc_id = f"{self.db_type}:{self.database}:{content_hash}"
metadata = {}
for col in self.metadata_columns:
if col in row_dict and row_dict[col] is not None:
value = row_dict[col]
if isinstance(value, datetime):
value = value.isoformat()
elif isinstance(value, (dict, list)):
value = json.dumps(value, ensure_ascii=False)
else:
value = str(value)
metadata[col] = value
doc_updated_at = datetime.now(timezone.utc)
if self.timestamp_column and self.timestamp_column in row_dict:
ts_value = row_dict[self.timestamp_column]
if isinstance(ts_value, datetime):
if ts_value.tzinfo is None:
doc_updated_at = ts_value.replace(tzinfo=timezone.utc)
else:
doc_updated_at = ts_value
first_content_col = self.content_columns[0] if self.content_columns else "record"
semantic_id = str(row_dict.get(first_content_col, "database_record")).replace("\n", " ").replace("\r", " ").strip()[:100]
return Document(
id=doc_id,
blob=content.encode("utf-8"),
source=DocumentSource(self.db_type.value),
semantic_identifier=semantic_id,
extension=".txt",
doc_updated_at=doc_updated_at,
size_bytes=len(content.encode("utf-8")),
metadata=metadata if metadata else None,
)
def _yield_documents_from_query(
self,
query: str,
) -> Generator[list[Document], None, None]:
"""Generate documents from a single query."""
connection = self._get_connection()
cursor = connection.cursor()
try:
logging.info(f"Executing query: {query[:200]}...")
cursor.execute(query)
column_names = [desc[0] for desc in cursor.description]
batch: list[Document] = []
for row in cursor:
try:
doc = self._row_to_document(row, column_names)
batch.append(doc)
if len(batch) >= self.batch_size:
yield batch
batch = []
except Exception as e:
logging.warning(f"Error converting row to document: {e}")
continue
if batch:
yield batch
finally:
try:
cursor.fetchall()
except Exception:
pass
cursor.close()
def _yield_documents(
self,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
) -> Generator[list[Document], None, None]:
"""Generate documents from database query results."""
if self.query:
query = self._build_query_with_time_filter(start, end)
yield from self._yield_documents_from_query(query)
else:
tables = self._get_tables()
logging.info(f"No query specified. Loading all {len(tables)} tables: {tables}")
for table in tables:
query = f"SELECT * FROM {table}"
logging.info(f"Loading table: {table}")
yield from self._yield_documents_from_query(query)
self._close_connection()
def load_from_state(self) -> Generator[list[Document], None, None]:
"""Load all documents from the database (full sync)."""
logging.debug(f"Loading all records from {self.db_type} database: {self.database}")
return self._yield_documents()
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> Generator[list[Document], None, None]:
"""Poll for new/updated documents since the last sync (incremental sync)."""
if not self.timestamp_column:
logging.warning(
"No timestamp column configured for incremental sync. "
"Falling back to full sync."
)
return self.load_from_state()
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
logging.debug(
f"Polling {self.db_type} database {self.database} "
f"from {start_datetime} to {end_datetime}"
)
return self._yield_documents(start_datetime, end_datetime)
def validate_connector_settings(self) -> None:
"""Validate connector settings by testing the connection."""
if not self._credentials:
raise ConnectorMissingCredentialError("RDBMS credentials not loaded.")
if not self.host:
raise ConnectorValidationError("Database host is required.")
if not self.database:
raise ConnectorValidationError("Database name is required.")
if not self.content_columns:
raise ConnectorValidationError(
"At least one content column must be specified."
)
try:
connection = self._get_connection()
cursor = connection.cursor()
test_query = "SELECT 1"
cursor.execute(test_query)
cursor.fetchone()
cursor.close()
logging.info(f"Successfully connected to {self.db_type} database: {self.database}")
except ConnectorValidationError:
self._close_connection()
raise
except Exception as e:
self._close_connection()
raise ConnectorValidationError(
f"Failed to connect to {self.db_type} database: {str(e)}"
)
finally:
self._close_connection()
if __name__ == "__main__":
import os
credentials_dict = {
"username": os.environ.get("DB_USERNAME", "root"),
"password": os.environ.get("DB_PASSWORD", ""),
}
connector = RDBMSConnector(
db_type="mysql",
host=os.environ.get("DB_HOST", "localhost"),
port=int(os.environ.get("DB_PORT", "3306")),
database=os.environ.get("DB_NAME", "test"),
query="SELECT * FROM products LIMIT 10",
content_columns="name,description",
metadata_columns="id,category,price",
id_column="id",
timestamp_column="updated_at",
)
try:
connector.load_credentials(credentials_dict)
connector.validate_connector_settings()
for batch in connector.load_from_state():
print(f"Batch of {len(batch)} documents:")
for doc in batch:
print(f" - {doc.id}: {doc.semantic_identifier}")
break
except Exception as e:
print(f"Error: {e}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/rdbms_connector.py",
"license": "Apache License 2.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/doc_store/ob_conn_base.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import threading
import time
from abc import abstractmethod
from typing import Any
from pymysql.converters import escape_string
from pyobvector import ObVecClient, FtsIndexParam, FtsParser, VECTOR
from sqlalchemy import Column, JSON, Table
from sqlalchemy.dialects.mysql import VARCHAR
from common.doc_store.doc_store_base import DocStoreConnection, MatchExpr, OrderByExpr
ATTEMPT_TIME = 2
# Common templates for OceanBase
index_name_template = "ix_%s_%s"
fulltext_index_name_template = "fts_idx_%s"
fulltext_search_template = "MATCH (%s) AGAINST ('%s' IN NATURAL LANGUAGE MODE)"
vector_search_template = "cosine_distance(%s, '%s')"
vector_column_pattern = re.compile(r"q_(?P<vector_size>\d+)_vec")
# Document metadata table columns
doc_meta_columns = [
Column("id", VARCHAR(256), primary_key=True, comment="document id"),
Column("kb_id", VARCHAR(256), nullable=False, comment="knowledge base id"),
Column("meta_fields", JSON, nullable=True, comment="document metadata fields"),
]
doc_meta_column_names = [col.name for col in doc_meta_columns]
doc_meta_column_types = {col.name: col.type for col in doc_meta_columns}
def get_value_str(value: Any) -> str:
"""Convert value to SQL string representation."""
if isinstance(value, str):
# escape_string already handles all necessary escaping for MySQL/OceanBase
# including backslashes, quotes, newlines, etc.
return f"'{escape_string(value)}'"
elif isinstance(value, bool):
return "true" if value else "false"
elif value is None:
return "NULL"
elif isinstance(value, (list, dict)):
json_str = json.dumps(value, ensure_ascii=False)
return f"'{escape_string(json_str)}'"
else:
return str(value)
def _try_with_lock(lock_name: str, process_func, check_func, timeout: int = None):
"""Execute function with distributed lock."""
if not timeout:
timeout = int(os.environ.get("OB_DDL_TIMEOUT", "60"))
if not check_func():
from rag.utils.redis_conn import RedisDistributedLock
lock = RedisDistributedLock(lock_name)
if lock.acquire():
try:
process_func()
return
except Exception as e:
if "Duplicate" in str(e):
return
raise
finally:
lock.release()
if not check_func():
time.sleep(1)
count = 1
while count < timeout and not check_func():
count += 1
time.sleep(1)
if count >= timeout and not check_func():
raise Exception(f"Timeout to wait for process complete for {lock_name}.")
class OBConnectionBase(DocStoreConnection):
"""Base class for OceanBase document store connections."""
def __init__(self, logger_name: str = 'ragflow.ob_conn'):
from common.doc_store.ob_conn_pool import OB_CONN
self.logger = logging.getLogger(logger_name)
self.client: ObVecClient = OB_CONN.get_client()
self.es = OB_CONN.get_hybrid_search_client()
self.db_name = OB_CONN.get_db_name()
self.uri = OB_CONN.get_uri()
self._load_env_vars()
self._table_exists_cache: set[str] = set()
self._table_exists_cache_lock = threading.RLock()
# Cache for vector columns: stores (table_name, vector_size) tuples
self._vector_column_cache: set[tuple[str, int]] = set()
self._vector_column_cache_lock = threading.RLock()
self.logger.info(f"OceanBase {self.uri} connection initialized.")
def _load_env_vars(self):
def is_true(var: str, default: str) -> bool:
return os.getenv(var, default).lower() in ['true', '1', 'yes', 'y']
self.enable_fulltext_search = is_true('ENABLE_FULLTEXT_SEARCH', 'true')
self.use_fulltext_hint = is_true('USE_FULLTEXT_HINT', 'true')
self.search_original_content = is_true("SEARCH_ORIGINAL_CONTENT", 'true')
self.enable_hybrid_search = is_true('ENABLE_HYBRID_SEARCH', 'false')
self.use_fulltext_first_fusion_search = is_true('USE_FULLTEXT_FIRST_FUSION_SEARCH', 'true')
# Adjust settings based on hybrid search availability
if self.es is not None and self.search_original_content:
self.logger.info("HybridSearch is enabled, forcing search_original_content to False")
self.search_original_content = False
"""
Template methods - must be implemented by subclasses
"""
@abstractmethod
def get_index_columns(self) -> list[str]:
"""Return list of column names that need regular indexes."""
raise NotImplementedError("Not implemented")
@abstractmethod
def get_fulltext_columns(self) -> list[str]:
"""Return list of column names that need fulltext indexes (without weight suffix)."""
raise NotImplementedError("Not implemented")
@abstractmethod
def get_column_definitions(self) -> list[Column]:
"""Return list of column definitions for table creation."""
raise NotImplementedError("Not implemented")
def get_extra_columns(self) -> list[Column]:
"""Return list of extra columns to add after table creation. Override if needed."""
return []
def get_table_name(self, index_name: str, dataset_id: str) -> str:
"""Return the actual table name given index_name and dataset_id."""
return index_name
@abstractmethod
def get_lock_prefix(self) -> str:
"""Return the lock name prefix for distributed locking."""
raise NotImplementedError("Not implemented")
"""
Database operations
"""
def db_type(self) -> str:
return "oceanbase"
def health(self) -> dict:
return {
"uri": self.uri,
"version_comment": self._get_variable_value("version_comment")
}
def _get_variable_value(self, var_name: str) -> Any:
rows = self.client.perform_raw_text_sql(f"SHOW VARIABLES LIKE '{var_name}'")
for row in rows:
return row[1]
raise Exception(f"Variable '{var_name}' not found.")
"""
Table operations - common implementation using template methods
"""
def _check_table_exists_cached(self, table_name: str) -> bool:
"""
Check table existence with cache to reduce INFORMATION_SCHEMA queries.
Thread-safe implementation using RLock.
"""
if table_name in self._table_exists_cache:
return True
try:
if not self.client.check_table_exists(table_name):
return False
# Check regular indexes
for column_name in self.get_index_columns():
if not self._index_exists(table_name, index_name_template % (table_name, column_name)):
return False
# Check fulltext indexes
for column_name in self.get_fulltext_columns():
if not self._index_exists(table_name, fulltext_index_name_template % column_name):
return False
# Check extra columns
for column in self.get_extra_columns():
if not self._column_exist(table_name, column.name):
return False
except Exception as e:
raise Exception(f"OBConnection._check_table_exists_cached error: {str(e)}")
with self._table_exists_cache_lock:
if table_name not in self._table_exists_cache:
self._table_exists_cache.add(table_name)
return True
def _create_table(self, table_name: str):
"""Create table using column definitions from subclass."""
self._create_table_with_columns(table_name, self.get_column_definitions())
def create_idx(self, index_name: str, dataset_id: str, vector_size: int, parser_id: str = None):
"""Create index/table with all necessary indexes."""
table_name = self.get_table_name(index_name, dataset_id)
lock_prefix = self.get_lock_prefix()
try:
_try_with_lock(
lock_name=f"{lock_prefix}create_table_{table_name}",
check_func=lambda: self.client.check_table_exists(table_name),
process_func=lambda: self._create_table(table_name),
)
for column_name in self.get_index_columns():
_try_with_lock(
lock_name=f"{lock_prefix}add_idx_{table_name}_{column_name}",
check_func=lambda cn=column_name: self._index_exists(table_name,
index_name_template % (table_name, cn)),
process_func=lambda cn=column_name: self._add_index(table_name, cn),
)
for column_name in self.get_fulltext_columns():
_try_with_lock(
lock_name=f"{lock_prefix}add_fulltext_idx_{table_name}_{column_name}",
check_func=lambda cn=column_name: self._index_exists(table_name, fulltext_index_name_template % cn),
process_func=lambda cn=column_name: self._add_fulltext_index(table_name, cn),
)
# Add vector column and index (skip metadata refresh, will be done in finally)
self._ensure_vector_column_exists(table_name, vector_size, refresh_metadata=False)
# Add extra columns if any
for column in self.get_extra_columns():
_try_with_lock(
lock_name=f"{lock_prefix}add_{column.name}_{table_name}",
check_func=lambda c=column: self._column_exist(table_name, c.name),
process_func=lambda c=column: self._add_column(table_name, c),
)
except Exception as e:
raise Exception(f"OBConnection.create_idx error: {str(e)}")
finally:
self.client.refresh_metadata([table_name])
def create_doc_meta_idx(self, index_name: str):
"""
Create a document metadata table.
Table name pattern: ragflow_doc_meta_{tenant_id}
- Per-tenant metadata table for storing document metadata fields
"""
table_name = index_name
lock_prefix = self.get_lock_prefix()
try:
# Create table with distributed lock
_try_with_lock(
lock_name=f"{lock_prefix}create_doc_meta_table_{table_name}",
check_func=lambda: self.client.check_table_exists(table_name),
process_func=lambda: self._create_table_with_columns(table_name, doc_meta_columns),
)
# Create index on kb_id for better query performance
_try_with_lock(
lock_name=f"{lock_prefix}add_idx_{table_name}_kb_id",
check_func=lambda: self._index_exists(table_name, index_name_template % (table_name, "kb_id")),
process_func=lambda: self._add_index(table_name, "kb_id"),
)
self.logger.info(f"Created document metadata table '{table_name}'.")
return True
except Exception as e:
self.logger.error(f"OBConnection.create_doc_meta_idx error: {str(e)}")
return False
finally:
self.client.refresh_metadata([table_name])
def delete_idx(self, index_name: str, dataset_id: str):
"""Delete index/table."""
# For doc_meta tables, use index_name directly as table name
if index_name.startswith("ragflow_doc_meta_"):
table_name = index_name
else:
table_name = self.get_table_name(index_name, dataset_id)
try:
if self.client.check_table_exists(table_name=table_name):
self.client.drop_table_if_exist(table_name)
self.logger.info(f"Dropped table '{table_name}'.")
except Exception as e:
raise Exception(f"OBConnection.delete_idx error: {str(e)}")
def index_exist(self, index_name: str, dataset_id: str = None) -> bool:
"""Check if index/table exists."""
# For doc_meta tables, use index_name directly and only check table existence
# (metadata tables don't have fulltext/vector indexes that chunk tables have)
if index_name.startswith("ragflow_doc_meta_"):
if index_name in self._table_exists_cache:
return True
if not self.client.check_table_exists(index_name):
return False
with self._table_exists_cache_lock:
self._table_exists_cache.add(index_name)
return True
table_name = self.get_table_name(index_name, dataset_id) if dataset_id else index_name
return self._check_table_exists_cached(table_name)
"""
Table operations - helper methods
"""
def _get_count(self, table_name: str, filter_list: list[str] = None) -> int:
where_clause = "WHERE " + " AND ".join(filter_list) if filter_list and len(filter_list) > 0 else ""
(count,) = self.client.perform_raw_text_sql(
f"SELECT COUNT(*) FROM {table_name} {where_clause}"
).fetchone()
return count
def _column_exist(self, table_name: str, column_name: str) -> bool:
return self._get_count(
table_name="INFORMATION_SCHEMA.COLUMNS",
filter_list=[
f"TABLE_SCHEMA = '{self.db_name}'",
f"TABLE_NAME = '{table_name}'",
f"COLUMN_NAME = '{column_name}'",
]) > 0
def _index_exists(self, table_name: str, idx_name: str) -> bool:
return self._get_count(
table_name="INFORMATION_SCHEMA.STATISTICS",
filter_list=[
f"TABLE_SCHEMA = '{self.db_name}'",
f"TABLE_NAME = '{table_name}'",
f"INDEX_NAME = '{idx_name}'",
]) > 0
def _create_table_with_columns(self, table_name: str, columns: list[Column]):
"""Create table with specified columns."""
if table_name in self.client.metadata_obj.tables:
self.client.metadata_obj.remove(Table(table_name, self.client.metadata_obj))
table_options = {
"mysql_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
"mysql_organization": "heap",
}
self.client.create_table(
table_name=table_name,
columns=[c.copy() for c in columns],
**table_options,
)
self.logger.info(f"Created table '{table_name}'.")
def _add_index(self, table_name: str, column_name: str):
idx_name = index_name_template % (table_name, column_name)
self.client.create_index(
table_name=table_name,
is_vec_index=False,
index_name=idx_name,
column_names=[column_name],
)
self.logger.info(f"Created index '{idx_name}' on table '{table_name}'.")
def _add_fulltext_index(self, table_name: str, column_name: str):
fulltext_idx_name = fulltext_index_name_template % column_name
self.client.create_fts_idx_with_fts_index_param(
table_name=table_name,
fts_idx_param=FtsIndexParam(
index_name=fulltext_idx_name,
field_names=[column_name],
parser_type=FtsParser.IK,
),
)
self.logger.info(f"Created full text index '{fulltext_idx_name}' on table '{table_name}'.")
def _add_vector_column(self, table_name: str, vector_size: int):
vector_field_name = f"q_{vector_size}_vec"
self.client.add_columns(
table_name=table_name,
columns=[Column(vector_field_name, VECTOR(vector_size), nullable=True)],
)
self.logger.info(f"Added vector column '{vector_field_name}' to table '{table_name}'.")
def _add_vector_index(self, table_name: str, vector_field_name: str):
vector_idx_name = f"{vector_field_name}_idx"
self.client.create_index(
table_name=table_name,
is_vec_index=True,
index_name=vector_idx_name,
column_names=[vector_field_name],
vidx_params="distance=cosine, type=hnsw, lib=vsag",
)
self.logger.info(
f"Created vector index '{vector_idx_name}' on table '{table_name}' with column '{vector_field_name}'."
)
def _add_column(self, table_name: str, column: Column):
try:
self.client.add_columns(
table_name=table_name,
columns=[column.copy()],
)
self.logger.info(f"Added column '{column.name}' to table '{table_name}'.")
except Exception as e:
self.logger.warning(f"Failed to add column '{column.name}' to table '{table_name}': {str(e)}")
def _ensure_vector_column_exists(self, table_name: str, vector_size: int, refresh_metadata: bool = True):
"""
Ensure vector column and index exist for the given vector size.
This method is safe to call multiple times - it will skip if already exists.
Uses cache to avoid repeated INFORMATION_SCHEMA queries.
Args:
table_name: Name of the table
vector_size: Size of the vector column
refresh_metadata: Whether to refresh SQLAlchemy metadata after changes (default True)
"""
if vector_size <= 0:
return
cache_key = (table_name, vector_size)
# Check cache first
if cache_key in self._vector_column_cache:
return
lock_prefix = self.get_lock_prefix()
vector_field_name = f"q_{vector_size}_vec"
vector_index_name = f"{vector_field_name}_idx"
# Check if already exists (may have been created by another process)
column_exists = self._column_exist(table_name, vector_field_name)
index_exists = self._index_exists(table_name, vector_index_name)
if column_exists and index_exists:
# Already exists, add to cache and return
with self._vector_column_cache_lock:
self._vector_column_cache.add(cache_key)
return
# Create column if needed
if not column_exists:
_try_with_lock(
lock_name=f"{lock_prefix}add_vector_column_{table_name}_{vector_field_name}",
check_func=lambda: self._column_exist(table_name, vector_field_name),
process_func=lambda: self._add_vector_column(table_name, vector_size),
)
# Create index if needed
if not index_exists:
_try_with_lock(
lock_name=f"{lock_prefix}add_vector_idx_{table_name}_{vector_field_name}",
check_func=lambda: self._index_exists(table_name, vector_index_name),
process_func=lambda: self._add_vector_index(table_name, vector_field_name),
)
if refresh_metadata:
self.client.refresh_metadata([table_name])
# Add to cache after successful creation
with self._vector_column_cache_lock:
self._vector_column_cache.add(cache_key)
def _execute_search_sql(self, sql: str) -> tuple[list, float]:
start_time = time.time()
res = self.client.perform_raw_text_sql(sql)
rows = res.fetchall()
elapsed_time = time.time() - start_time
return rows, elapsed_time
def _parse_fulltext_columns(
self,
fulltext_query: str,
fulltext_columns: list[str]
) -> tuple[dict[str, str], dict[str, float]]:
"""
Parse fulltext search columns with optional weight suffix and build search expressions.
Args:
fulltext_query: The escaped fulltext query string
fulltext_columns: List of column names, optionally with weight suffix (e.g., "col^0.5")
Returns:
Tuple of (fulltext_search_expr dict, fulltext_search_weight dict)
where weights are normalized to 0~1
"""
fulltext_search_expr: dict[str, str] = {}
fulltext_search_weight: dict[str, float] = {}
# get fulltext match expression and weight values
for field in fulltext_columns:
parts = field.split("^")
column_name: str = parts[0]
column_weight: float = float(parts[1]) if (len(parts) > 1 and parts[1]) else 1.0
fulltext_search_weight[column_name] = column_weight
fulltext_search_expr[column_name] = fulltext_search_template % (column_name, fulltext_query)
# adjust the weight to 0~1
weight_sum = sum(fulltext_search_weight.values())
n = len(fulltext_search_weight)
if weight_sum <= 0 < n:
# All weights are 0 (e.g. "col^0"); use equal weights to avoid ZeroDivisionError
for column_name in fulltext_search_weight:
fulltext_search_weight[column_name] = 1.0 / n
else:
for column_name in fulltext_search_weight:
fulltext_search_weight[column_name] = fulltext_search_weight[column_name] / weight_sum
return fulltext_search_expr, fulltext_search_weight
def _build_vector_search_sql(
self,
table_name: str,
fields_expr: str,
vector_search_score_expr: str,
filters_expr: str,
vector_search_filter: str,
vector_search_expr: str,
limit: int,
vector_topn: int,
offset: int = 0
) -> str:
sql = (
f"SELECT {fields_expr}, {vector_search_score_expr} AS _score"
f" FROM {table_name}"
f" WHERE {filters_expr} AND {vector_search_filter}"
f" ORDER BY {vector_search_expr}"
f" APPROXIMATE LIMIT {limit if limit != 0 else vector_topn}"
)
if offset != 0:
sql += f" OFFSET {offset}"
return sql
def _build_fulltext_search_sql(
self,
table_name: str,
fields_expr: str,
fulltext_search_score_expr: str,
filters_expr: str,
fulltext_search_filter: str,
offset: int,
limit: int,
fulltext_topn: int,
hint: str = ""
) -> str:
hint_expr = f"{hint} " if hint else ""
return (
f"SELECT {hint_expr}{fields_expr}, {fulltext_search_score_expr} AS _score"
f" FROM {table_name}"
f" WHERE {filters_expr} AND {fulltext_search_filter}"
f" ORDER BY _score DESC"
f" LIMIT {offset}, {limit if limit != 0 else fulltext_topn}"
)
def _build_filter_search_sql(
self,
table_name: str,
fields_expr: str,
filters_expr: str,
order_by_expr: str = "",
limit_expr: str = ""
) -> str:
return (
f"SELECT {fields_expr}"
f" FROM {table_name}"
f" WHERE {filters_expr}"
f" {order_by_expr} {limit_expr}"
)
def _build_count_sql(
self,
table_name: str,
filters_expr: str,
extra_filter: str = "",
hint: str = ""
) -> str:
hint_expr = f"{hint} " if hint else ""
where_clause = f"{filters_expr} AND {extra_filter}" if extra_filter else filters_expr
return f"SELECT {hint_expr}COUNT(id) FROM {table_name} WHERE {where_clause}"
def _row_to_entity(self, data, fields: list[str]) -> dict:
entity = {}
for i, field in enumerate(fields):
value = data[i]
if value is None:
continue
entity[field] = value
return entity
def _get_dataset_id_field(self) -> str:
return "kb_id"
def _get_filters(self, condition: dict) -> list[str]:
filters: list[str] = []
for k, v in condition.items():
if not v:
continue
if k == "exists":
filters.append(f"{v} IS NOT NULL")
elif k == "must_not" and isinstance(v, dict) and "exists" in v:
filters.append(f"{v.get('exists')} IS NULL")
elif isinstance(v, list):
values: list[str] = []
for item in v:
values.append(get_value_str(item))
value = ", ".join(values)
filters.append(f"{k} IN ({value})")
else:
filters.append(f"{k} = {get_value_str(v)}")
return filters
def get(self, doc_id: str, index_name: str, dataset_ids: list[str]) -> dict | None:
if not self._check_table_exists_cached(index_name):
return None
try:
res = self.client.get(
table_name=index_name,
ids=[doc_id],
)
row = res.fetchone()
if row is None:
return None
return self._row_to_entity(row, fields=list(res.keys()))
except Exception as e:
self.logger.exception(f"OBConnectionBase.get({doc_id}) got exception")
raise e
def delete(self, condition: dict, index_name: str, dataset_id: str) -> int:
if not self._check_table_exists_cached(index_name):
return 0
# For doc_meta tables, don't add dataset_id to condition
if not index_name.startswith("ragflow_doc_meta_"):
condition[self._get_dataset_id_field()] = dataset_id
try:
from sqlalchemy import text
res = self.client.get(
table_name=index_name,
ids=None,
where_clause=[text(f) for f in self._get_filters(condition)],
output_column_name=["id"],
)
rows = res.fetchall()
if len(rows) == 0:
return 0
ids = [row[0] for row in rows]
self.logger.debug(f"OBConnection.delete, filters: {condition}, ids: {ids}")
self.client.delete(
table_name=index_name,
ids=ids,
)
return len(ids)
except Exception as e:
self.logger.error(f"OBConnection.delete error: {str(e)}")
return 0
"""
Abstract CRUD methods that must be implemented by subclasses
"""
@abstractmethod
def search(
self,
select_fields: list[str],
highlight_fields: list[str],
condition: dict,
match_expressions: list[MatchExpr],
order_by: OrderByExpr,
offset: int,
limit: int,
index_names: str | list[str],
knowledgebase_ids: list[str],
agg_fields: list[str] | None = None,
rank_feature: dict | None = None,
**kwargs,
):
raise NotImplementedError("Not implemented")
@abstractmethod
def insert(self, documents: list[dict], index_name: str, dataset_id: str = None) -> list[str]:
raise NotImplementedError("Not implemented")
@abstractmethod
def update(self, condition: dict, new_value: dict, index_name: str, dataset_id: str) -> bool:
raise NotImplementedError("Not implemented")
"""
Helper functions for search result - abstract methods
"""
@abstractmethod
def get_total(self, res) -> int:
raise NotImplementedError("Not implemented")
@abstractmethod
def get_doc_ids(self, res) -> list[str]:
raise NotImplementedError("Not implemented")
@abstractmethod
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
raise NotImplementedError("Not implemented")
@abstractmethod
def get_highlight(self, res, keywords: list[str], field_name: str):
raise NotImplementedError("Not implemented")
@abstractmethod
def get_aggregation(self, res, field_name: str):
raise NotImplementedError("Not implemented")
"""
SQL - can be overridden by subclasses
"""
def sql(self, sql: str, fetch_size: int, format: str):
"""Execute SQL query - default implementation."""
return None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/doc_store/ob_conn_base.py",
"license": "Apache License 2.0",
"lines": 636,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/doc_store/ob_conn_pool.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from pyobvector import ObVecClient
from pyobvector.client import ClusterVersionException
from pyobvector.client.hybrid_search import HybridSearch
from pyobvector.util import ObVersion
from common import settings
from common.decorator import singleton
ATTEMPT_TIME = 2
OB_QUERY_TIMEOUT = int(os.environ.get("OB_QUERY_TIMEOUT", "100_000_000"))
logger = logging.getLogger('ragflow.ob_conn_pool')
@singleton
class OceanBaseConnectionPool:
def __init__(self):
self.client = None
self.es = None # HybridSearch client
if hasattr(settings, "OB"):
self.OB_CONFIG = settings.OB
else:
self.OB_CONFIG = settings.get_base_config("oceanbase", {})
scheme = self.OB_CONFIG.get("scheme")
ob_config = self.OB_CONFIG.get("config", {})
if scheme and scheme.lower() == "mysql":
mysql_config = settings.get_base_config("mysql", {})
logger.info("Use MySQL scheme to create OceanBase connection.")
host = mysql_config.get("host", "localhost")
port = mysql_config.get("port", 2881)
self.username = mysql_config.get("user", "root@test")
self.password = mysql_config.get("password", "infini_rag_flow")
max_connections = mysql_config.get("max_connections", 300)
else:
logger.info("Use customized config to create OceanBase connection.")
host = ob_config.get("host", "localhost")
port = ob_config.get("port", 2881)
self.username = ob_config.get("user", "root@test")
self.password = ob_config.get("password", "infini_rag_flow")
max_connections = ob_config.get("max_connections", 300)
self.db_name = ob_config.get("db_name", "test")
self.uri = f"{host}:{port}"
logger.info(f"Use OceanBase '{self.uri}' as the doc engine.")
max_overflow = int(os.environ.get("OB_MAX_OVERFLOW", max(max_connections // 2, 10)))
pool_timeout = int(os.environ.get("OB_POOL_TIMEOUT", "30"))
for _ in range(ATTEMPT_TIME):
try:
self.client = ObVecClient(
uri=self.uri,
user=self.username,
password=self.password,
db_name=self.db_name,
pool_pre_ping=True,
pool_recycle=3600,
pool_size=max_connections,
max_overflow=max_overflow,
pool_timeout=pool_timeout,
)
break
except Exception as e:
logger.warning(f"{str(e)}. Waiting OceanBase {self.uri} to be healthy.")
time.sleep(5)
if self.client is None:
msg = f"OceanBase {self.uri} connection failed after {ATTEMPT_TIME} attempts."
logger.error(msg)
raise Exception(msg)
self._check_ob_version()
self._try_to_update_ob_query_timeout()
self._init_hybrid_search(max_connections, max_overflow, pool_timeout)
logger.info(f"OceanBase {self.uri} is healthy.")
def _check_ob_version(self):
try:
res = self.client.perform_raw_text_sql("SELECT OB_VERSION() FROM DUAL").fetchone()
version_str = res[0] if res else None
logger.info(f"OceanBase {self.uri} version is {version_str}")
except Exception as e:
raise Exception(f"Failed to get OceanBase version from {self.uri}, error: {str(e)}")
if not version_str:
raise Exception(f"Failed to get OceanBase version from {self.uri}.")
ob_version = ObVersion.from_db_version_string(version_str)
if ob_version < ObVersion.from_db_version_nums(4, 3, 5, 1):
raise Exception(
f"The version of OceanBase needs to be higher than or equal to 4.3.5.1, current version is {version_str}"
)
def _try_to_update_ob_query_timeout(self):
try:
rows = self.client.perform_raw_text_sql("SHOW VARIABLES LIKE 'ob_query_timeout'")
for row in rows:
val = row[1]
if val and int(val) >= OB_QUERY_TIMEOUT:
return
except Exception as e:
logger.warning("Failed to get 'ob_query_timeout' variable: %s", str(e))
try:
self.client.perform_raw_text_sql(f"SET GLOBAL ob_query_timeout={OB_QUERY_TIMEOUT}")
logger.info("Set GLOBAL variable 'ob_query_timeout' to %d.", OB_QUERY_TIMEOUT)
self.client.engine.dispose()
logger.info("Disposed all connections in engine pool to refresh connection pool")
except Exception as e:
logger.warning(f"Failed to set 'ob_query_timeout' variable: {str(e)}")
def _init_hybrid_search(self, max_connections, max_overflow, pool_timeout):
enable_hybrid_search = os.getenv('ENABLE_HYBRID_SEARCH', 'false').lower() in ['true', '1', 'yes', 'y']
if enable_hybrid_search:
try:
self.es = HybridSearch(
uri=self.uri,
user=self.username,
password=self.password,
db_name=self.db_name,
pool_pre_ping=True,
pool_recycle=3600,
pool_size=max_connections,
max_overflow=max_overflow,
pool_timeout=pool_timeout,
)
logger.info("OceanBase Hybrid Search feature is enabled")
except ClusterVersionException as e:
logger.info("Failed to initialize HybridSearch client, fallback to use SQL", exc_info=e)
self.es = None
def get_client(self) -> ObVecClient:
return self.client
def get_hybrid_search_client(self) -> HybridSearch | None:
return self.es
def get_db_name(self) -> str:
return self.db_name
def get_uri(self) -> str:
return self.uri
def refresh_client(self) -> ObVecClient:
try:
self.client.perform_raw_text_sql("SELECT 1 FROM DUAL")
return self.client
except Exception as e:
logger.warning(f"OceanBase connection unhealthy: {str(e)}, refreshing...")
self.client.engine.dispose()
return self.client
def __del__(self):
if hasattr(self, "client") and self.client:
try:
self.client.engine.dispose()
except Exception:
pass
if hasattr(self, "es") and self.es:
try:
self.es.engine.dispose()
except Exception:
pass
OB_CONN = OceanBaseConnectionPool()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/doc_store/ob_conn_pool.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/seafile_connector.py | """SeaFile connector with granular sync support"""
import logging
from datetime import datetime, timezone
from typing import Any, Optional
from retry import retry
from common.data_source.utils import (
get_file_ext,
rl_requests,
)
from common.data_source.config import (
DocumentSource,
INDEX_BATCH_SIZE,
BLOB_STORAGE_SIZE_THRESHOLD,
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
)
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import (
Document,
SecondsSinceUnixEpoch,
GenerateDocumentsOutput,
SeafileSyncScope,
)
logger = logging.getLogger(__name__)
class SeaFileConnector(LoadConnector, PollConnector):
"""SeaFile connector supporting account-, library- and directory-level sync.
API endpoints used:
Account token (api2):
GET /api2/account/info/
GET /api2/repos/
GET /api2/repos/{repo_id}/
GET /api2/repos/{repo_id}/dir/?p=...
GET /api2/repos/{repo_id}/file/?p=...&reuse=1
Repo token (api/v2.1/via-repo-token):
GET /api/v2.1/via-repo-token/repo-info/
GET /api/v2.1/via-repo-token/dir/?path=...
GET /api/v2.1/via-repo-token/download-link/?path=...
"""
def __init__(
self,
seafile_url: str,
batch_size: int = INDEX_BATCH_SIZE,
include_shared: bool = True,
sync_scope: str = SeafileSyncScope.ACCOUNT,
repo_id: Optional[str] = None,
sync_path: Optional[str] = None,
) -> None:
self.seafile_url = seafile_url.rstrip("/")
self.batch_size = batch_size
self.include_shared = include_shared
self.sync_scope = SeafileSyncScope(sync_scope)
self.repo_id = repo_id
self.sync_path = self._normalise_path(sync_path)
self.token: Optional[str] = None # account-level
self.repo_token: Optional[str] = None # library-scoped
self.current_user_email: Optional[str] = None
self.size_threshold: int = BLOB_STORAGE_SIZE_THRESHOLD
self._validate_scope_params()
@staticmethod
def _normalise_path(path: Optional[str]) -> str:
if not path:
return "/"
path = path.strip()
if not path.startswith("/"):
path = f"/{path}"
return path.rstrip("/") or "/"
@staticmethod
def _parse_mtime(raw_mtime) -> datetime:
"""Parse mtime from SeaFile API response.
Handles:
- Unix timestamp as int: 1575514722
- Unix timestamp as str: "1575514722"
- ISO 8601 datetime str: "2026-02-15T17:26:53+01:00"
- None / missing
"""
if not raw_mtime:
return datetime.now(timezone.utc)
# Try as unix timestamp (int or numeric string)
if isinstance(raw_mtime, (int, float)):
return datetime.fromtimestamp(raw_mtime, tz=timezone.utc)
if isinstance(raw_mtime, str):
# Try numeric string first
try:
return datetime.fromtimestamp(int(raw_mtime), tz=timezone.utc)
except ValueError:
pass
# Try ISO 8601
try:
return datetime.fromisoformat(raw_mtime)
except ValueError:
pass
logger.warning("Unparseable mtime %r, using current time", raw_mtime)
return datetime.now(timezone.utc)
def _validate_scope_params(self) -> None:
if self.sync_scope in (SeafileSyncScope.LIBRARY, SeafileSyncScope.DIRECTORY):
if not self.repo_id:
raise ConnectorValidationError(
f"sync_scope={self.sync_scope.value!r} requires 'repo_id'."
)
if self.sync_scope == SeafileSyncScope.DIRECTORY:
if self.sync_path == "/":
raise ConnectorValidationError(
"sync_scope='directory' requires a non-root 'sync_path'. "
"Use sync_scope='library' to sync an entire library."
)
@property
def _use_repo_token(self) -> bool:
"""Whether we should use repo-token endpoints."""
return self.repo_token is not None
def _account_headers(self) -> dict[str, str]:
if not self.token:
raise ConnectorMissingCredentialError("Account token not set")
return {
"Authorization": f"Token {self.token}",
"Accept": "application/json",
}
def _repo_token_headers(self) -> dict[str, str]:
if not self.repo_token:
raise ConnectorMissingCredentialError("Repo token not set")
return {
"Authorization": f"Bearer {self.repo_token}", # <-- Bearer, not Token
"Accept": "application/json",
}
def _account_get(self, endpoint: str, params: Optional[dict] = None):
"""GET against /api2/... using the account token."""
url = f"{self.seafile_url}/api2/{endpoint.lstrip('/')}"
resp = rl_requests.get(
url, headers=self._account_headers(), params=params, timeout=60,
)
return resp
def _repo_token_get(self, endpoint: str, params: Optional[dict] = None):
"""GET against /api/v2.1/via-repo-token/... using the repo token."""
url = f"{self.seafile_url}/api/v2.1/via-repo-token/{endpoint.lstrip('/')}"
resp = rl_requests.get(
url, headers=self._repo_token_headers(), params=params, timeout=60,
)
return resp
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
logger.debug("Loading credentials for SeaFile server %s", self.seafile_url)
token = credentials.get("seafile_token")
repo_token = credentials.get("repo_token")
username = credentials.get("username")
password = credentials.get("password")
if token:
self.token = token
elif username and password:
self.token = self._authenticate_with_password(username, password)
if repo_token and self.sync_scope in (SeafileSyncScope.LIBRARY, SeafileSyncScope.DIRECTORY):
self.repo_token = repo_token
elif repo_token:
logger.debug(
"repo_token supplied but scope=%s; ignoring.",
self.sync_scope.value,
)
if not self.token and not self.repo_token:
raise ConnectorMissingCredentialError(
"SeaFile requires 'seafile_token', 'repo_token', "
"or 'username'/'password'."
)
try:
self._validate_credentials()
except ConnectorMissingCredentialError:
raise
except Exception as e:
raise CredentialExpiredError(
f"SeaFile credential validation failed: {e}"
)
return None
def _authenticate_with_password(self, username: str, password: str) -> str:
try:
resp = rl_requests.post(
f"{self.seafile_url}/api2/auth-token/",
data={"username": username, "password": password},
timeout=30,
)
resp.raise_for_status()
token = resp.json().get("token")
if not token:
raise CredentialExpiredError("No token returned")
return token
except Exception as e:
raise ConnectorMissingCredentialError(
f"Failed to authenticate with SeaFile: {e}"
)
def _validate_credentials(self) -> None:
if self.token:
self._validate_account_token()
if self.repo_token:
self._validate_repo_token()
elif self.sync_scope in (SeafileSyncScope.LIBRARY, SeafileSyncScope.DIRECTORY):
self._validate_repo_access_via_account()
def _validate_account_token(self) -> dict:
resp = self._account_get("/account/info/")
resp.raise_for_status()
info = resp.json()
self.current_user_email = info.get("email")
logger.info("SeaFile authenticated as: %s", self.current_user_email)
return info
def _validate_repo_token(self) -> None:
"""Validate repo token using /api/v2.1/via-repo-token/repo-info/"""
try:
resp = self._repo_token_get("repo-info/")
resp.raise_for_status()
info = resp.json()
logger.info(
"Repo token validated — library: %s (id: %s)",
info.get("repo_name", "?"), info.get("repo_id", self.repo_id),
)
# Update repo_id from response if not set
if not self.repo_id and info.get("repo_id"):
self.repo_id = info["repo_id"]
except Exception as e:
raise CredentialExpiredError(
f"Repo token validation failed: {e}"
)
def _validate_repo_access_via_account(self) -> None:
repo_info = self._get_repo_info_via_account(self.repo_id)
if not repo_info:
raise ConnectorValidationError(
f"Library {self.repo_id} not accessible with account token."
)
if self.sync_scope == SeafileSyncScope.DIRECTORY:
entries = self._get_directory_entries(self.repo_id, self.sync_path)
if entries is None:
raise ConnectorValidationError(
f"Directory {self.sync_path!r} does not exist "
f"in library {self.repo_id}."
)
def validate_connector_settings(self) -> None:
if not self.token and not self.repo_token:
raise ConnectorMissingCredentialError("SeaFile credentials not loaded.")
if not self.seafile_url:
raise ConnectorValidationError("No SeaFile URL was provided.")
try:
if self.sync_scope == SeafileSyncScope.ACCOUNT:
libs = self._get_libraries()
logger.info("Validated (account scope). %d libraries.", len(libs))
elif self.sync_scope == SeafileSyncScope.LIBRARY:
info = self._get_repo_info()
logger.info(
"Validated (library scope): %s", info.get("name", self.repo_id)
)
elif self.sync_scope == SeafileSyncScope.DIRECTORY:
entries = self._get_directory_entries(self.repo_id, self.sync_path)
logger.info(
"Validated (directory scope): %s:%s (%d entries)",
self.repo_id, self.sync_path, len(entries),
)
except (
ConnectorValidationError, ConnectorMissingCredentialError,
CredentialExpiredError, InsufficientPermissionsError,
):
raise
except Exception as e:
status = getattr(getattr(e, "response", None), "status_code", None)
if status == 401:
raise CredentialExpiredError("Token invalid or expired.")
if status == 403:
raise InsufficientPermissionsError("Insufficient permissions.")
raise ConnectorValidationError(f"Validation failed: {repr(e)}")
@retry(tries=3, delay=1, backoff=2)
def _get_libraries(self) -> list[dict]:
"""List all libraries (account token only)."""
resp = self._account_get("/repos/")
resp.raise_for_status()
libraries = resp.json()
if not self.include_shared and self.current_user_email:
libraries = [
lib for lib in libraries
if lib.get("owner") == self.current_user_email
or lib.get("owner_email") == self.current_user_email
]
return libraries
@retry(tries=3, delay=1, backoff=2)
def _get_repo_info_via_account(self, repo_id: str) -> Optional[dict]:
"""GET /api2/repos/{repo_id}/ — account token."""
try:
resp = self._account_get(f"/repos/{repo_id}/")
resp.raise_for_status()
return resp.json()
except Exception as e:
logger.warning("Error fetching repo info for %s: %s", repo_id, e)
return None
@retry(tries=3, delay=1, backoff=2)
def _get_repo_info_via_repo_token(self) -> Optional[dict]:
"""GET /api/v2.1/via-repo-token/repo-info/ — repo token."""
try:
resp = self._repo_token_get("repo-info/")
resp.raise_for_status()
return resp.json()
except Exception as e:
logger.warning("Error fetching repo info via repo token: %s", e)
return None
def _get_repo_info(self) -> Optional[dict]:
"""Get repo info using whichever token is available."""
if self._use_repo_token:
info = self._get_repo_info_via_repo_token()
if info:
# Normalise keys to match account-token response shape
return {
"id": info.get("repo_id", self.repo_id),
"name": info.get("repo_name", self.repo_id),
}
return None
return self._get_repo_info_via_account(self.repo_id)
@retry(tries=3, delay=1, backoff=2)
def _get_directory_entries(self, repo_id: str, path: str = "/") -> list[dict]:
"""List directory contents using the appropriate endpoint."""
try:
if self._use_repo_token:
# GET /api/v2.1/via-repo-token/dir/?path=/foo
resp = self._repo_token_get("dir/", params={"path": path})
else:
# GET /api2/repos/{repo_id}/dir/?p=/foo
resp = self._account_get(
f"/repos/{repo_id}/dir/", params={"p": path},
)
resp.raise_for_status()
data = resp.json()
# v2.1 wraps entries in {"dirent_list": [...]}
if isinstance(data, dict) and "dirent_list" in data:
return data["dirent_list"]
return data
except Exception as e:
logger.warning(
"Error fetching directory %s in repo %s: %s", path, repo_id, e,
)
return []
@retry(tries=3, delay=1, backoff=2)
def _get_file_download_link(
self, repo_id: str, path: str
) -> Optional[str]:
"""Get a temporary download URL for a file."""
try:
if self._use_repo_token:
# GET /api/v2.1/via-repo-token/download-link/?path=/foo.pdf
resp = self._repo_token_get(
"download-link/", params={"path": path},
)
else:
# GET /api2/repos/{repo_id}/file/?p=/foo.pdf&reuse=1
resp = self._account_get(
f"/repos/{repo_id}/file/", params={"p": path, "reuse": 1},
)
resp.raise_for_status()
return resp.text.strip('"')
except Exception as e:
logger.warning("Error getting download link for %s: %s", path, e)
return None
def _list_files_recursive(
self,
repo_id: str,
repo_name: str,
path: str,
start: datetime,
end: datetime,
) -> list[tuple[str, dict, dict]]:
files = []
entries = self._get_directory_entries(repo_id, path)
for entry in entries:
entry_type = entry.get("type")
entry_name = entry.get("name", "")
entry_path = f"{path.rstrip('/')}/{entry_name}"
if entry_type == "dir":
files.extend(
self._list_files_recursive(
repo_id, repo_name, entry_path, start, end,
)
)
elif entry_type == "file":
modified = self._parse_mtime(entry.get("mtime"))
if start < modified <= end:
files.append(
(entry_path, entry,
{"id": repo_id, "name": repo_name})
)
return files
def _resolve_libraries_to_scan(self) -> list[dict]:
if self.sync_scope == SeafileSyncScope.ACCOUNT:
return [
{"id": lib["id"], "name": lib.get("name", "Unknown")}
for lib in self._get_libraries() if lib.get("id")
]
info = self._get_repo_info()
if info:
return [{"id": info.get("id", self.repo_id),
"name": info.get("name", self.repo_id)}]
return [{"id": self.repo_id, "name": self.repo_id}]
def _root_path_for_repo(self, repo_id: str) -> str:
if (self.sync_scope == SeafileSyncScope.DIRECTORY
and repo_id == self.repo_id):
return self.sync_path
return "/"
def _yield_seafile_documents(
self, start: datetime, end: datetime,
) -> GenerateDocumentsOutput:
libraries = self._resolve_libraries_to_scan()
logger.info(
"Processing %d library(ies) [scope=%s]",
len(libraries), self.sync_scope.value,
)
all_files: list[tuple[str, dict, dict]] = []
for lib in libraries:
root = self._root_path_for_repo(lib["id"])
logger.debug("Scanning %s starting at %s", lib["name"], root)
try:
files = self._list_files_recursive(
lib["id"], lib["name"], root, start, end,
)
all_files.extend(files)
except Exception as e:
logger.error("Error in library %s: %s", lib["name"], e)
logger.info("Found %d file(s) matching criteria", len(all_files))
batch: list[Document] = []
for file_path, file_entry, library in all_files:
file_name = file_entry.get("name", "")
file_size = file_entry.get("size", 0)
file_id = file_entry.get("id", "")
repo_id = library["id"]
repo_name = library["name"]
modified = self._parse_mtime(file_entry.get("mtime"))
if file_size > self.size_threshold:
logger.warning("Skipping large file: %s (%d B)", file_path, file_size)
continue
try:
download_link = self._get_file_download_link(repo_id, file_path)
if not download_link:
continue
resp = rl_requests.get(download_link, timeout=120)
resp.raise_for_status()
blob = resp.content
if not blob:
continue
batch.append(Document(
id=f"seafile:{repo_id}:{file_id}",
blob=blob,
source=DocumentSource.SEAFILE,
semantic_identifier=f"{repo_name}{file_path}",
extension=get_file_ext(file_name),
doc_updated_at=modified, # <-- already parsed
size_bytes=len(blob),
))
if len(batch) >= self.batch_size:
yield batch
batch = []
except Exception as e:
logger.error("Error downloading %s: %s", file_path, e)
if batch:
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
return self._yield_seafile_documents(
start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime.now(timezone.utc),
)
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch,
) -> GenerateDocumentsOutput:
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
for batch in self._yield_seafile_documents(start_dt, end_dt):
yield batch
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/seafile_connector.py",
"license": "Apache License 2.0",
"lines": 465,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/unit_test/common/test_apply_semi_auto_meta_data_filter.py | import pytest
from common.metadata_utils import apply_meta_data_filter
from unittest.mock import MagicMock, AsyncMock, patch
@pytest.mark.asyncio
async def test_apply_meta_data_filter_semi_auto_key():
meta_data_filter = {
"method": "semi_auto",
"semi_auto": ["key1", "key2"]
}
metas = {
"key1": {"val1": ["doc1"]},
"key2": {"val2": ["doc2"]}
}
question = "find val1"
chat_mdl = MagicMock()
with patch("rag.prompts.generator.gen_meta_filter", new_callable=AsyncMock) as mock_gen:
mock_gen.return_value = {"conditions": [{"key": "key1", "op": "=", "value": "val1"}], "logic": "and"}
doc_ids = await apply_meta_data_filter(meta_data_filter, metas, question, chat_mdl)
assert doc_ids == ["doc1"]
# Check that constraints is an empty dict by default for legacy
mock_gen.assert_called_once()
args, kwargs = mock_gen.call_args
assert kwargs["constraints"] == {}
@pytest.mark.asyncio
async def test_apply_meta_data_filter_semi_auto_key_and_operator():
meta_data_filter = {
"method": "semi_auto",
"semi_auto": [{"key": "key1", "op": ">"}, "key2"]
}
metas = {
"key1": {"10": ["doc1"]},
"key2": {"val2": ["doc2"]}
}
question = "find key1 > 5"
chat_mdl = MagicMock()
with patch("rag.prompts.generator.gen_meta_filter", new_callable=AsyncMock) as mock_gen:
mock_gen.return_value = {"conditions": [{"key": "key1", "op": ">", "value": "5"}], "logic": "and"}
doc_ids = await apply_meta_data_filter(meta_data_filter, metas, question, chat_mdl)
assert doc_ids == ["doc1"]
# Check that constraints are correctly passed
mock_gen.assert_called_once()
args, kwargs = mock_gen.call_args
assert kwargs["constraints"] == {"key1": ">"}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_apply_semi_auto_meta_data_filter.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/utils/test_ob_conn.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for OceanBase connection utility functions.
"""
from rag.utils.ob_conn import get_value_str, get_metadata_filter_expression
class TestGetValueStr:
"""Test cases for the get_value_str function."""
def test_none_value(self):
"""Test that None is converted to NULL."""
assert get_value_str(None) == "NULL"
def test_integer_zero(self):
"""Test that integer 0 is correctly converted."""
assert get_value_str(0) == "0"
def test_float_zero(self):
"""Test that float 0.0 is correctly converted."""
assert get_value_str(0.0) == "0.0"
def test_positive_integer(self):
"""Test positive integer conversion."""
assert get_value_str(42) == "42"
def test_negative_integer(self):
"""Test negative integer conversion."""
assert get_value_str(-42) == "-42"
def test_positive_float(self):
"""Test positive float conversion."""
assert get_value_str(3.14) == "3.14"
def test_negative_float(self):
"""Test negative float conversion."""
assert get_value_str(-3.14) == "-3.14"
def test_boolean_true(self):
"""Test that True is converted to lowercase 'true'."""
assert get_value_str(True) == "true"
def test_boolean_false(self):
"""Test that False is converted to lowercase 'false'."""
assert get_value_str(False) == "false"
def test_empty_string(self):
"""Test that empty string is quoted correctly."""
assert get_value_str("") == "''"
def test_simple_string(self):
"""Test simple string is quoted."""
assert get_value_str("hello") == "'hello'"
def test_string_with_quotes(self):
"""Test string with single quotes is escaped."""
result = get_value_str("O'Reilly")
assert result == "'O\\'Reilly'" or result == "'O''Reilly'"
def test_string_with_double_quotes(self):
"""Test string with double quotes."""
result = get_value_str('Say "hello"')
assert '"' in result or '\\"' in result
def test_empty_list(self):
"""Test that empty list is converted to JSON string."""
assert get_value_str([]) == "'[]'"
def test_list_with_items(self):
"""Test list with items is converted to JSON string."""
result = get_value_str([1, 2, 3])
assert result == "'[1, 2, 3]'"
def test_empty_dict(self):
"""Test that empty dict is converted to JSON string."""
assert get_value_str({}) == "'{}'"
def test_dict_with_items(self):
"""Test dict with items is converted to JSON string."""
result = get_value_str({"key": "value"})
assert "key" in result
assert "value" in result
assert result.startswith("'")
assert result.endswith("'")
def test_nested_structure(self):
"""Test nested list/dict structures."""
result = get_value_str({"list": [1, 2], "nested": {"a": "b"}})
assert result.startswith("'")
assert result.endswith("'")
def test_unicode_string(self):
"""Test Unicode characters in strings."""
result = get_value_str("你好世界")
assert "你好世界" in result
assert result.startswith("'")
assert result.endswith("'")
def test_special_characters(self):
"""Test special SQL characters are escaped."""
result = get_value_str("test\\backslash")
assert "test" in result
class TestGetMetadataFilterExpression:
"""Test cases for the get_metadata_filter_expression function."""
def test_simple_is_condition(self):
"""Test simple 'is' comparison."""
filter_dict = {
"conditions": [
{"name": "author", "comparison_operator": "is", "value": "John"}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.author')" in result
assert "= 'John'" in result
def test_numeric_comparison_with_zero(self):
"""Test numeric comparison with zero value (regression test for bug)."""
filter_dict = {
"conditions": [
{"name": "count", "comparison_operator": "=", "value": 0}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.count')" in result
assert "= 0" in result
assert "= ''" not in result # Should not produce empty string
def test_numeric_comparison_with_float_zero(self):
"""Test numeric comparison with 0.0."""
filter_dict = {
"conditions": [
{"name": "rating", "comparison_operator": "=", "value": 0.0}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.rating')" in result
assert "0.0" in result
def test_empty_string_condition(self):
"""Test condition with empty string value."""
filter_dict = {
"conditions": [
{"name": "status", "comparison_operator": "is", "value": ""}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.status')" in result
assert "= ''" in result
def test_boolean_false_condition(self):
"""Test condition with False value."""
filter_dict = {
"conditions": [
{"name": "active", "comparison_operator": "is", "value": False}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.active')" in result
assert "false" in result
def test_empty_list_condition(self):
"""Test condition with empty list."""
filter_dict = {
"conditions": [
{"name": "tags", "comparison_operator": "is", "value": []}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.tags')" in result
assert "'[]'" in result
def test_empty_dict_condition(self):
"""Test condition with empty dict."""
filter_dict = {
"conditions": [
{"name": "metadata", "comparison_operator": "is", "value": {}}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.metadata')" in result
assert "'{}'" in result
def test_none_value_condition(self):
"""Test condition with None value."""
filter_dict = {
"conditions": [
{"name": "optional", "comparison_operator": "is", "value": None}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.optional')" in result
assert "NULL" in result
def test_multiple_conditions_with_and(self):
"""Test multiple conditions with AND operator."""
filter_dict = {
"conditions": [
{"name": "author", "comparison_operator": "is", "value": "John"},
{"name": "year", "comparison_operator": ">", "value": 2020}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.author')" in result
assert "JSON_EXTRACT(metadata, '$.year')" in result
assert " and " in result.lower()
def test_multiple_conditions_with_or(self):
"""Test multiple conditions with OR operator."""
filter_dict = {
"conditions": [
{"name": "status", "comparison_operator": "is", "value": "active"},
{"name": "status", "comparison_operator": "is", "value": "pending"}
],
"logical_operator": "or"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.status')" in result
assert " or " in result.lower()
def test_greater_than_operator(self):
"""Test greater than comparison."""
filter_dict = {
"conditions": [
{"name": "score", "comparison_operator": ">", "value": 90}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert ">" in result
assert "90" in result
def test_less_than_operator(self):
"""Test less than comparison."""
filter_dict = {
"conditions": [
{"name": "age", "comparison_operator": "<", "value": 18}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "<" in result
assert "18" in result
def test_contains_operator(self):
"""Test contains operator."""
filter_dict = {
"conditions": [
{"name": "title", "comparison_operator": "contains", "value": "Python"}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.title')" in result
def test_empty_operator(self):
"""Test empty operator."""
filter_dict = {
"conditions": [
{"name": "description", "comparison_operator": "empty", "value": None}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.description')" in result
assert "IS NULL" in result or "= ''" in result
def test_not_empty_operator(self):
"""Test not empty operator."""
filter_dict = {
"conditions": [
{"name": "description", "comparison_operator": "not empty", "value": None}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert "JSON_EXTRACT(metadata, '$.description')" in result
def test_parentheses_wrapping(self):
"""Test that result is wrapped in parentheses."""
filter_dict = {
"conditions": [
{"name": "field", "comparison_operator": "is", "value": "value"}
],
"logical_operator": "and"
}
result = get_metadata_filter_expression(filter_dict)
assert result.startswith("(")
assert result.endswith(")")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/utils/test_ob_conn.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/cli.py | """
CLI entry point for RAGFlow ES to OceanBase migration tool.
"""
import json
import logging
import sys
import click
from rich.console import Console
from rich.table import Table
from rich.logging import RichHandler
from .es_client import ESClient
from .ob_client import OBClient
from .migrator import ESToOceanBaseMigrator
from .verify import MigrationVerifier
console = Console()
def setup_logging(verbose: bool = False):
"""Setup logging configuration."""
level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
level=level,
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True, console=console)],
)
@click.group()
@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging")
@click.pass_context
def main(ctx, verbose):
"""RAGFlow ES to OceanBase Migration Tool.
Migrate RAGFlow data from Elasticsearch 8+ to OceanBase with schema conversion,
vector data mapping, batch import, and resume capability.
This tool is specifically designed for RAGFlow's data structure.
"""
ctx.ensure_object(dict)
ctx.obj["verbose"] = verbose
setup_logging(verbose)
@main.command()
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--es-user", default=None, help="Elasticsearch username")
@click.option("--es-password", default=None, help="Elasticsearch password")
@click.option("--es-api-key", default=None, help="Elasticsearch API key")
@click.option("--ob-host", default="localhost", help="OceanBase host")
@click.option("--ob-port", default=2881, type=int, help="OceanBase port")
@click.option("--ob-user", default="root@test", help="OceanBase user (format: user@tenant)")
@click.option("--ob-password", default="", help="OceanBase password")
@click.option("--ob-database", default="test", help="OceanBase database")
@click.option("--index", "-i", default=None, help="Source ES index name (omit to migrate all ragflow_* indices)")
@click.option("--table", "-t", default=None, help="Target OceanBase table name (omit to use same name as index)")
@click.option("--batch-size", default=1000, type=int, help="Batch size for migration")
@click.option("--resume", is_flag=True, help="Resume from previous progress")
@click.option("--verify/--no-verify", default=True, help="Verify after migration")
@click.option("--progress-dir", default=".migration_progress", help="Progress file directory")
@click.pass_context
def migrate(
ctx,
es_host,
es_port,
es_user,
es_password,
es_api_key,
ob_host,
ob_port,
ob_user,
ob_password,
ob_database,
index,
table,
batch_size,
resume,
verify,
progress_dir,
):
"""Run RAGFlow data migration from Elasticsearch to OceanBase.
If --index is omitted, all indices starting with 'ragflow_' will be migrated.
If --table is omitted, the same name as the source index will be used.
"""
console.print("[bold]RAGFlow ES to OceanBase Migration[/]")
try:
# Initialize ES client first to discover indices if needed
es_client = ESClient(
host=es_host,
port=es_port,
username=es_user,
password=es_password,
api_key=es_api_key,
)
ob_client = OBClient(
host=ob_host,
port=ob_port,
user=ob_user,
password=ob_password,
database=ob_database,
)
# Determine indices to migrate
if index:
# Single index specified
indices_to_migrate = [(index, table if table else index)]
else:
# Auto-discover all ragflow_* indices
console.print("\n[cyan]Discovering RAGFlow indices...[/]")
ragflow_indices = es_client.list_ragflow_indices()
if not ragflow_indices:
console.print("[yellow]No ragflow_* indices found in Elasticsearch[/]")
sys.exit(0)
# Each index maps to a table with the same name
indices_to_migrate = [(idx, idx) for idx in ragflow_indices]
console.print(f"[green]Found {len(indices_to_migrate)} RAGFlow indices:[/]")
for idx, _ in indices_to_migrate:
doc_count = es_client.count_documents(idx)
console.print(f" - {idx} ({doc_count:,} documents)")
console.print()
# Initialize migrator
migrator = ESToOceanBaseMigrator(
es_client=es_client,
ob_client=ob_client,
progress_dir=progress_dir,
)
# Track overall results
total_success = 0
total_failed = 0
results = []
# Migrate each index
for es_index, ob_table in indices_to_migrate:
console.print(f"\n[bold blue]{'='*60}[/]")
console.print(f"[bold]Migrating: {es_index} -> {ob_database}.{ob_table}[/]")
console.print(f"[bold blue]{'='*60}[/]")
result = migrator.migrate(
es_index=es_index,
ob_table=ob_table,
batch_size=batch_size,
resume=resume,
verify_after=verify,
)
results.append(result)
if result["success"]:
total_success += 1
else:
total_failed += 1
# Summary for multiple indices
if len(indices_to_migrate) > 1:
console.print(f"\n[bold]{'='*60}[/]")
console.print("[bold]Migration Summary[/]")
console.print(f"[bold]{'='*60}[/]")
console.print(f" Total indices: {len(indices_to_migrate)}")
console.print(f" [green]Successful: {total_success}[/]")
if total_failed > 0:
console.print(f" [red]Failed: {total_failed}[/]")
# Exit code based on results
if total_failed == 0:
console.print("\n[bold green]All migrations completed successfully![/]")
sys.exit(0)
else:
console.print(f"\n[bold red]{total_failed} migration(s) failed[/]")
sys.exit(1)
except Exception as e:
console.print(f"[bold red]Error: {e}[/]")
if ctx.obj.get("verbose"):
console.print_exception()
sys.exit(1)
finally:
# Cleanup
if "es_client" in locals():
es_client.close()
if "ob_client" in locals():
ob_client.close()
@main.command()
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--es-user", default=None, help="Elasticsearch username")
@click.option("--es-password", default=None, help="Elasticsearch password")
@click.option("--index", "-i", required=True, help="ES index name")
@click.option("--output", "-o", default=None, help="Output file (JSON)")
@click.pass_context
def schema(ctx, es_host, es_port, es_user, es_password, index, output):
"""Preview RAGFlow schema analysis from ES mapping."""
try:
es_client = ESClient(
host=es_host,
port=es_port,
username=es_user,
password=es_password,
)
# Dummy OB client for schema preview
ob_client = None
migrator = ESToOceanBaseMigrator(es_client, ob_client if ob_client else OBClient.__new__(OBClient))
# Directly use schema converter
from .schema import RAGFlowSchemaConverter
converter = RAGFlowSchemaConverter()
es_mapping = es_client.get_index_mapping(index)
analysis = converter.analyze_es_mapping(es_mapping)
column_defs = converter.get_column_definitions()
# Display analysis
console.print(f"\n[bold]ES Index Analysis: {index}[/]\n")
# Known RAGFlow fields
console.print(f"[green]Known RAGFlow fields:[/] {len(analysis['known_fields'])}")
# Vector fields
if analysis['vector_fields']:
console.print("\n[cyan]Vector fields detected:[/]")
for vf in analysis['vector_fields']:
console.print(f" - {vf['name']} (dimension: {vf['dimension']})")
# Unknown fields
if analysis['unknown_fields']:
console.print("\n[yellow]Unknown fields (will be stored in 'extra'):[/]")
for uf in analysis['unknown_fields']:
console.print(f" - {uf}")
# Display RAGFlow column schema
console.print(f"\n[bold]RAGFlow OceanBase Schema ({len(column_defs)} columns):[/]\n")
table = Table(title="Column Definitions")
table.add_column("Column Name", style="cyan")
table.add_column("OB Type", style="green")
table.add_column("Nullable", style="yellow")
table.add_column("Special", style="magenta")
for col in column_defs[:20]: # Show first 20
special = []
if col.get("is_primary"):
special.append("PK")
if col.get("index"):
special.append("IDX")
if col.get("is_array"):
special.append("ARRAY")
if col.get("is_vector"):
special.append("VECTOR")
table.add_row(
col["name"],
col["ob_type"],
"Yes" if col.get("nullable", True) else "No",
", ".join(special) if special else "-",
)
if len(column_defs) > 20:
table.add_row("...", f"({len(column_defs) - 20} more)", "", "")
console.print(table)
# Save to file if requested
if output:
preview = {
"es_index": index,
"es_mapping": es_mapping,
"analysis": analysis,
"ob_columns": column_defs,
}
with open(output, "w") as f:
json.dump(preview, f, indent=2, default=str)
console.print(f"\nSchema saved to {output}")
except Exception as e:
console.print(f"[bold red]Error: {e}[/]")
if ctx.obj.get("verbose"):
console.print_exception()
sys.exit(1)
finally:
if "es_client" in locals():
es_client.close()
@main.command()
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--ob-host", default="localhost", help="OceanBase host")
@click.option("--ob-port", default=2881, type=int, help="OceanBase port")
@click.option("--ob-user", default="root@test", help="OceanBase user")
@click.option("--ob-password", default="", help="OceanBase password")
@click.option("--ob-database", default="test", help="OceanBase database")
@click.option("--index", "-i", required=True, help="Source ES index name")
@click.option("--table", "-t", required=True, help="Target OceanBase table name")
@click.option("--sample-size", default=100, type=int, help="Sample size for verification")
@click.pass_context
def verify(
ctx,
es_host,
es_port,
ob_host,
ob_port,
ob_user,
ob_password,
ob_database,
index,
table,
sample_size,
):
"""Verify migration data consistency."""
try:
es_client = ESClient(host=es_host, port=es_port)
ob_client = OBClient(
host=ob_host,
port=ob_port,
user=ob_user,
password=ob_password,
database=ob_database,
)
verifier = MigrationVerifier(es_client, ob_client)
result = verifier.verify(
index, table,
sample_size=sample_size,
)
console.print(verifier.generate_report(result))
sys.exit(0 if result.passed else 1)
except Exception as e:
console.print(f"[bold red]Error: {e}[/]")
if ctx.obj.get("verbose"):
console.print_exception()
sys.exit(1)
finally:
if "es_client" in locals():
es_client.close()
if "ob_client" in locals():
ob_client.close()
@main.command("list-indices")
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--es-user", default=None, help="Elasticsearch username")
@click.option("--es-password", default=None, help="Elasticsearch password")
@click.pass_context
def list_indices(ctx, es_host, es_port, es_user, es_password):
"""List all RAGFlow indices (ragflow_*) in Elasticsearch."""
try:
es_client = ESClient(
host=es_host,
port=es_port,
username=es_user,
password=es_password,
)
console.print(f"\n[bold]RAGFlow Indices in Elasticsearch ({es_host}:{es_port})[/]\n")
indices = es_client.list_ragflow_indices()
if not indices:
console.print("[yellow]No ragflow_* indices found[/]")
return
table = Table(title="RAGFlow Indices")
table.add_column("Index Name", style="cyan")
table.add_column("Document Count", style="green", justify="right")
table.add_column("Type", style="yellow")
total_docs = 0
for idx in indices:
doc_count = es_client.count_documents(idx)
total_docs += doc_count
# Determine index type
if idx.startswith("ragflow_doc_meta_"):
idx_type = "Metadata"
elif idx.startswith("ragflow_"):
idx_type = "Document Chunks"
else:
idx_type = "Unknown"
table.add_row(idx, f"{doc_count:,}", idx_type)
table.add_row("", "", "")
table.add_row("[bold]Total[/]", f"[bold]{total_docs:,}[/]", f"[bold]{len(indices)} indices[/]")
console.print(table)
except Exception as e:
console.print(f"[bold red]Error: {e}[/]")
if ctx.obj.get("verbose"):
console.print_exception()
sys.exit(1)
finally:
if "es_client" in locals():
es_client.close()
@main.command("list-kb")
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--es-user", default=None, help="Elasticsearch username")
@click.option("--es-password", default=None, help="Elasticsearch password")
@click.option("--index", "-i", required=True, help="ES index name")
@click.pass_context
def list_kb(ctx, es_host, es_port, es_user, es_password, index):
"""List all knowledge bases in an ES index."""
try:
es_client = ESClient(
host=es_host,
port=es_port,
username=es_user,
password=es_password,
)
console.print(f"\n[bold]Knowledge Bases in index: {index}[/]\n")
# Get kb_id aggregation
agg_result = es_client.aggregate_field(index, "kb_id")
buckets = agg_result.get("buckets", [])
if not buckets:
console.print("[yellow]No knowledge bases found[/]")
return
table = Table(title="Knowledge Bases")
table.add_column("KB ID", style="cyan")
table.add_column("Document Count", style="green", justify="right")
total_docs = 0
for bucket in buckets:
table.add_row(
bucket["key"],
f"{bucket['doc_count']:,}",
)
total_docs += bucket["doc_count"]
table.add_row("", "")
table.add_row("[bold]Total[/]", f"[bold]{total_docs:,}[/]")
console.print(table)
console.print(f"\nTotal knowledge bases: {len(buckets)}")
except Exception as e:
console.print(f"[bold red]Error: {e}[/]")
if ctx.obj.get("verbose"):
console.print_exception()
sys.exit(1)
finally:
if "es_client" in locals():
es_client.close()
@main.command()
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--ob-host", default="localhost", help="OceanBase host")
@click.option("--ob-port", default=2881, type=int, help="OceanBase port")
@click.option("--ob-user", default="root@test", help="OceanBase user")
@click.option("--ob-password", default="", help="OceanBase password")
@click.pass_context
def status(ctx, es_host, es_port, ob_host, ob_port, ob_user, ob_password):
"""Check connection status to ES and OceanBase."""
console.print("[bold]Connection Status[/]\n")
# Check ES
try:
es_client = ESClient(host=es_host, port=es_port)
health = es_client.health_check()
info = es_client.get_cluster_info()
console.print(f"[green]Elasticsearch ({es_host}:{es_port}): Connected[/]")
console.print(f" Cluster: {health.get('cluster_name')}")
console.print(f" Status: {health.get('status')}")
console.print(f" Version: {info.get('version', {}).get('number', 'unknown')}")
# List indices
indices = es_client.list_indices("*")
console.print(f" Indices: {len(indices)}")
es_client.close()
except Exception as e:
console.print(f"[red]Elasticsearch ({es_host}:{es_port}): Failed[/]")
console.print(f" Error: {e}")
console.print()
# Check OceanBase
try:
ob_client = OBClient(
host=ob_host,
port=ob_port,
user=ob_user,
password=ob_password,
)
if ob_client.health_check():
version = ob_client.get_version()
console.print(f"[green]OceanBase ({ob_host}:{ob_port}): Connected[/]")
console.print(f" Version: {version}")
else:
console.print(f"[red]OceanBase ({ob_host}:{ob_port}): Health check failed[/]")
ob_client.close()
except Exception as e:
console.print(f"[red]OceanBase ({ob_host}:{ob_port}): Failed[/]")
console.print(f" Error: {e}")
@main.command()
@click.option("--es-host", default="localhost", help="Elasticsearch host")
@click.option("--es-port", default=9200, type=int, help="Elasticsearch port")
@click.option("--index", "-i", required=True, help="ES index name")
@click.option("--size", "-n", default=5, type=int, help="Number of samples")
@click.pass_context
def sample(ctx, es_host, es_port, index, size):
"""Show sample documents from ES index."""
try:
es_client = ESClient(host=es_host, port=es_port)
docs = es_client.get_sample_documents(index, size)
console.print(f"\n[bold]Sample documents from {index}[/]")
console.print()
for i, doc in enumerate(docs, 1):
console.print(f"[bold cyan]Document {i}[/]")
console.print(f" _id: {doc.get('_id')}")
console.print(f" kb_id: {doc.get('kb_id')}")
console.print(f" doc_id: {doc.get('doc_id')}")
console.print(f" docnm_kwd: {doc.get('docnm_kwd')}")
# Check for vector fields
vector_fields = [k for k in doc.keys() if k.startswith("q_") and k.endswith("_vec")]
if vector_fields:
for vf in vector_fields:
vec = doc.get(vf)
if vec:
console.print(f" {vf}: [{len(vec)} dimensions]")
content = doc.get("content_with_weight", "")
if content:
if isinstance(content, dict):
content = json.dumps(content, ensure_ascii=False)
preview = content[:100] + "..." if len(str(content)) > 100 else content
console.print(f" content: {preview}")
console.print()
es_client.close()
except Exception as e:
console.print(f"[bold red]Error: {e}[/]")
if ctx.obj.get("verbose"):
console.print_exception()
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/cli.py",
"license": "Apache License 2.0",
"lines": 482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/es_client.py | """
Elasticsearch 8+ Client for RAGFlow data migration.
"""
import logging
from typing import Any, Iterator
from elasticsearch import Elasticsearch
logger = logging.getLogger(__name__)
class ESClient:
"""Elasticsearch client wrapper for RAGFlow migration operations."""
def __init__(
self,
host: str = "localhost",
port: int = 9200,
username: str | None = None,
password: str | None = None,
api_key: str | None = None,
use_ssl: bool = False,
verify_certs: bool = True,
):
"""
Initialize ES client.
Args:
host: ES host address
port: ES port
username: Basic auth username
password: Basic auth password
api_key: API key for authentication
use_ssl: Whether to use SSL
verify_certs: Whether to verify SSL certificates
"""
self.host = host
self.port = port
# Build connection URL
scheme = "https" if use_ssl else "http"
url = f"{scheme}://{host}:{port}"
# Build connection arguments
conn_args: dict[str, Any] = {
"hosts": [url],
"verify_certs": verify_certs,
}
if api_key:
conn_args["api_key"] = api_key
elif username and password:
conn_args["basic_auth"] = (username, password)
self.client = Elasticsearch(**conn_args)
logger.info(f"Connected to Elasticsearch at {url}")
def health_check(self) -> dict[str, Any]:
"""Check cluster health."""
return self.client.cluster.health().body
def get_cluster_info(self) -> dict[str, Any]:
"""Get cluster information."""
return self.client.info().body
def list_indices(self, pattern: str = "*") -> list[str]:
"""List all indices matching pattern."""
response = self.client.indices.get(index=pattern)
return list(response.keys())
def list_ragflow_indices(self) -> list[str]:
"""
List all RAGFlow-related indices.
Returns indices matching patterns:
- ragflow_* (document chunks)
- ragflow_doc_meta_* (document metadata)
Returns:
List of RAGFlow index names
"""
try:
# Get all ragflow_* indices
ragflow_indices = self.list_indices("ragflow_*")
return sorted(ragflow_indices)
except Exception:
# If no indices match, return empty list
return []
def get_index_mapping(self, index_name: str) -> dict[str, Any]:
"""
Get index mapping.
Args:
index_name: Name of the index
Returns:
Index mapping dictionary
"""
response = self.client.indices.get_mapping(index=index_name)
return response[index_name]["mappings"]
def get_index_settings(self, index_name: str) -> dict[str, Any]:
"""Get index settings."""
response = self.client.indices.get_settings(index=index_name)
return response[index_name]["settings"]
def count_documents(self, index_name: str) -> int:
"""Count documents in an index."""
response = self.client.count(index=index_name)
return response["count"]
def count_documents_with_filter(
self,
index_name: str,
filters: dict[str, Any]
) -> int:
"""
Count documents with filter conditions.
Args:
index_name: Index name
filters: Filter conditions (e.g., {"kb_id": "xxx"})
Returns:
Document count
"""
# Build bool query with filters
must_clauses = []
for field, value in filters.items():
if isinstance(value, list):
must_clauses.append({"terms": {field: value}})
else:
must_clauses.append({"term": {field: value}})
query = {
"bool": {
"must": must_clauses
}
} if must_clauses else {"match_all": {}}
response = self.client.count(index=index_name, query=query)
return response["count"]
def aggregate_field(
self,
index_name: str,
field: str,
size: int = 10000,
) -> dict[str, Any]:
"""
Aggregate field values (like getting all unique kb_ids).
Args:
index_name: Index name
field: Field to aggregate
size: Max number of buckets
Returns:
Aggregation result with buckets
"""
response = self.client.search(
index=index_name,
size=0,
aggs={
"field_values": {
"terms": {
"field": field,
"size": size,
}
}
}
)
return response["aggregations"]["field_values"]
def scroll_documents(
self,
index_name: str,
batch_size: int = 1000,
query: dict[str, Any] | None = None,
sort_field: str = "_doc",
) -> Iterator[list[dict[str, Any]]]:
"""
Scroll through all documents in an index using search_after (ES 8+).
This is the recommended approach for ES 8+ instead of scroll API.
Uses search_after for efficient deep pagination.
Args:
index_name: Name of the index
batch_size: Number of documents per batch
query: Optional query filter
sort_field: Field to sort by (default: _doc for efficiency)
Yields:
Batches of documents
"""
search_body: dict[str, Any] = {
"size": batch_size,
"sort": [{sort_field: "asc"}, {"_id": "asc"}],
}
if query:
search_body["query"] = query
else:
search_body["query"] = {"match_all": {}}
# Initial search
response = self.client.search(index=index_name, body=search_body)
hits = response["hits"]["hits"]
while hits:
# Extract documents with _id
documents = []
for hit in hits:
doc = hit["_source"].copy()
doc["_id"] = hit["_id"]
if "_score" in hit:
doc["_score"] = hit["_score"]
documents.append(doc)
yield documents
# Check if there are more results
if len(hits) < batch_size:
break
# Get search_after value from last hit
search_after = hits[-1]["sort"]
search_body["search_after"] = search_after
response = self.client.search(index=index_name, body=search_body)
hits = response["hits"]["hits"]
def get_document(self, index_name: str, doc_id: str) -> dict[str, Any] | None:
"""Get a single document by ID."""
try:
response = self.client.get(index=index_name, id=doc_id)
doc = response["_source"].copy()
doc["_id"] = response["_id"]
return doc
except Exception:
return None
def get_sample_documents(
self,
index_name: str,
size: int = 10,
query: dict[str, Any] | None = None,
) -> list[dict[str, Any]]:
"""
Get sample documents from an index.
Args:
index_name: Index name
size: Number of samples
query: Optional query filter
"""
search_body = {
"query": query if query else {"match_all": {}},
"size": size
}
response = self.client.search(index=index_name, body=search_body)
documents = []
for hit in response["hits"]["hits"]:
doc = hit["_source"].copy()
doc["_id"] = hit["_id"]
documents.append(doc)
return documents
def get_document_ids(
self,
index_name: str,
size: int = 1000,
query: dict[str, Any] | None = None,
) -> list[str]:
"""Get list of document IDs."""
search_body = {
"query": query if query else {"match_all": {}},
"size": size,
"_source": False,
}
response = self.client.search(index=index_name, body=search_body)
return [hit["_id"] for hit in response["hits"]["hits"]]
def close(self):
"""Close the ES client connection."""
self.client.close()
logger.info("Elasticsearch connection closed")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/es_client.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/migrator.py | """
RAGFlow-specific migration orchestrator from Elasticsearch to OceanBase.
"""
import logging
import time
from typing import Any, Callable
from rich.console import Console
from rich.progress import (
Progress,
SpinnerColumn,
TextColumn,
BarColumn,
TaskProgressColumn,
TimeRemainingColumn,
)
from .es_client import ESClient
from .ob_client import OBClient
from .schema import RAGFlowSchemaConverter, RAGFlowDataConverter
from .progress import ProgressManager, MigrationProgress
from .verify import MigrationVerifier
logger = logging.getLogger(__name__)
console = Console()
class ESToOceanBaseMigrator:
"""
RAGFlow-specific migration orchestrator.
This migrator is designed specifically for RAGFlow's data structure,
handling the fixed schema and vector embeddings correctly.
"""
def __init__(
self,
es_client: ESClient,
ob_client: OBClient,
progress_dir: str = ".migration_progress",
):
"""
Initialize migrator.
Args:
es_client: Elasticsearch client
ob_client: OceanBase client
progress_dir: Directory for progress files
"""
self.es_client = es_client
self.ob_client = ob_client
self.progress_manager = ProgressManager(progress_dir)
self.schema_converter = RAGFlowSchemaConverter()
def migrate(
self,
es_index: str,
ob_table: str,
batch_size: int = 1000,
resume: bool = False,
verify_after: bool = True,
on_progress: Callable[[int, int], None] | None = None,
) -> dict[str, Any]:
"""
Execute full migration from ES to OceanBase for RAGFlow data.
Args:
es_index: Source Elasticsearch index
ob_table: Target OceanBase table
batch_size: Documents per batch
resume: Resume from previous progress
verify_after: Run verification after migration
on_progress: Progress callback (migrated, total)
Returns:
Migration result dictionary
"""
start_time = time.time()
result = {
"success": False,
"es_index": es_index,
"ob_table": ob_table,
"total_documents": 0,
"migrated_documents": 0,
"failed_documents": 0,
"duration_seconds": 0,
"verification": None,
"error": None,
}
progress: MigrationProgress | None = None
try:
# Step 1: Check connections
console.print("[bold blue]Step 1: Checking connections...[/]")
self._check_connections()
# Step 2: Analyze ES index
console.print("\n[bold blue]Step 2: Analyzing ES index...[/]")
analysis = self._analyze_es_index(es_index)
# Auto-detect vector size from ES mapping
vector_size = 768 # Default fallback
if analysis["vector_fields"]:
vector_size = analysis["vector_fields"][0]["dimension"]
console.print(f" [green]Auto-detected vector dimension: {vector_size}[/]")
else:
console.print(f" [yellow]No vector fields found, using default: {vector_size}[/]")
console.print(f" Known RAGFlow fields: {len(analysis['known_fields'])}")
if analysis["unknown_fields"]:
console.print(f" [yellow]Unknown fields (will be stored in 'extra'): {analysis['unknown_fields']}[/]")
# Step 3: Get total document count
total_docs = self.es_client.count_documents(es_index)
console.print(f" Total documents: {total_docs:,}")
result["total_documents"] = total_docs
if total_docs == 0:
console.print("[yellow]No documents to migrate[/]")
result["success"] = True
return result
# Step 4: Handle resume or fresh start
if resume and self.progress_manager.can_resume(es_index, ob_table):
console.print("\n[bold yellow]Resuming from previous progress...[/]")
progress = self.progress_manager.load_progress(es_index, ob_table)
console.print(
f" Previously migrated: {progress.migrated_documents:,} documents"
)
else:
# Fresh start - check if table already exists
if self.ob_client.table_exists(ob_table):
raise RuntimeError(
f"Table '{ob_table}' already exists in OceanBase. "
f"Migration aborted to prevent data conflicts. "
f"Please drop the table manually or use a different table name."
)
progress = self.progress_manager.create_progress(
es_index, ob_table, total_docs
)
# Step 5: Create table if needed
if not progress.table_created:
console.print("\n[bold blue]Step 3: Creating OceanBase table...[/]")
if not self.ob_client.table_exists(ob_table):
self.ob_client.create_ragflow_table(
table_name=ob_table,
vector_size=vector_size,
create_indexes=True,
create_fts_indexes=True,
)
console.print(f" Created table '{ob_table}' with RAGFlow schema")
else:
console.print(f" Table '{ob_table}' already exists")
# Check and add vector column if needed
self.ob_client.add_vector_column(ob_table, vector_size)
progress.table_created = True
progress.indexes_created = True
progress.schema_converted = True
self.progress_manager.save_progress(progress)
# Step 6: Migrate data
console.print("\n[bold blue]Step 4: Migrating data...[/]")
data_converter = RAGFlowDataConverter()
migrated = self._migrate_data(
es_index=es_index,
ob_table=ob_table,
data_converter=data_converter,
progress=progress,
batch_size=batch_size,
on_progress=on_progress,
)
result["migrated_documents"] = migrated
result["failed_documents"] = progress.failed_documents
# Step 7: Mark completed
self.progress_manager.mark_completed(progress)
# Step 8: Verify (optional)
if verify_after:
console.print("\n[bold blue]Step 5: Verifying migration...[/]")
verifier = MigrationVerifier(self.es_client, self.ob_client)
verification = verifier.verify(
es_index, ob_table,
primary_key="id"
)
result["verification"] = {
"passed": verification.passed,
"message": verification.message,
"es_count": verification.es_count,
"ob_count": verification.ob_count,
"sample_match_rate": verification.sample_match_rate,
}
console.print(verifier.generate_report(verification))
result["success"] = True
result["duration_seconds"] = time.time() - start_time
console.print(
f"\n[bold green]Migration completed successfully![/]"
f"\n Total: {result['total_documents']:,} documents"
f"\n Migrated: {result['migrated_documents']:,} documents"
f"\n Failed: {result['failed_documents']:,} documents"
f"\n Duration: {result['duration_seconds']:.1f} seconds"
)
except KeyboardInterrupt:
console.print("\n[bold yellow]Migration interrupted by user[/]")
if progress:
self.progress_manager.mark_paused(progress)
result["error"] = "Interrupted by user"
except Exception as e:
logger.exception("Migration failed")
if progress:
self.progress_manager.mark_failed(progress, str(e))
result["error"] = str(e)
console.print(f"\n[bold red]Migration failed: {e}[/]")
return result
def _check_connections(self):
"""Verify connections to both databases."""
# Check ES
es_health = self.es_client.health_check()
if es_health.get("status") not in ("green", "yellow"):
raise RuntimeError(f"ES cluster unhealthy: {es_health}")
console.print(f" ES cluster status: {es_health.get('status')}")
# Check OceanBase
if not self.ob_client.health_check():
raise RuntimeError("OceanBase connection failed")
ob_version = self.ob_client.get_version()
console.print(f" OceanBase connection: OK (version: {ob_version})")
def _analyze_es_index(self, es_index: str) -> dict[str, Any]:
"""Analyze ES index structure for RAGFlow compatibility."""
es_mapping = self.es_client.get_index_mapping(es_index)
return self.schema_converter.analyze_es_mapping(es_mapping)
def _migrate_data(
self,
es_index: str,
ob_table: str,
data_converter: RAGFlowDataConverter,
progress: MigrationProgress,
batch_size: int,
on_progress: Callable[[int, int], None] | None,
) -> int:
"""Migrate data in batches."""
total = progress.total_documents
migrated = progress.migrated_documents
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TaskProgressColumn(),
TimeRemainingColumn(),
console=console,
) as pbar:
task = pbar.add_task(
"Migrating...",
total=total,
completed=migrated,
)
batch_count = 0
for batch in self.es_client.scroll_documents(es_index, batch_size):
batch_count += 1
# Convert batch to OceanBase format
ob_rows = data_converter.convert_batch(batch)
# Insert batch
try:
inserted = self.ob_client.insert_batch(ob_table, ob_rows)
migrated += inserted
# Update progress
last_ids = [doc.get("_id", doc.get("id", "")) for doc in batch]
self.progress_manager.update_progress(
progress,
migrated_count=inserted,
last_batch_ids=last_ids,
)
# Update progress bar
pbar.update(task, completed=migrated)
# Callback
if on_progress:
on_progress(migrated, total)
# Log periodically
if batch_count % 10 == 0:
logger.info(f"Migrated {migrated:,}/{total:,} documents")
except Exception as e:
logger.error(f"Batch insert failed: {e}")
progress.failed_documents += len(batch)
# Continue with next batch
return migrated
def get_schema_preview(self, es_index: str) -> dict[str, Any]:
"""
Get a preview of schema analysis without executing migration.
Args:
es_index: Elasticsearch index name
Returns:
Schema analysis information
"""
es_mapping = self.es_client.get_index_mapping(es_index)
analysis = self.schema_converter.analyze_es_mapping(es_mapping)
column_defs = self.schema_converter.get_column_definitions()
return {
"es_index": es_index,
"es_mapping": es_mapping,
"analysis": analysis,
"ob_columns": column_defs,
"vector_fields": self.schema_converter.get_vector_fields(),
"total_columns": len(column_defs),
}
def get_data_preview(
self,
es_index: str,
sample_size: int = 5,
kb_id: str | None = None,
) -> list[dict[str, Any]]:
"""
Get sample documents from ES for preview.
Args:
es_index: ES index name
sample_size: Number of samples
kb_id: Optional KB filter
"""
query = None
if kb_id:
query = {"term": {"kb_id": kb_id}}
return self.es_client.get_sample_documents(es_index, sample_size, query=query)
def list_knowledge_bases(self, es_index: str) -> list[str]:
"""
List all knowledge base IDs in an ES index.
Args:
es_index: ES index name
Returns:
List of kb_id values
"""
try:
agg_result = self.es_client.aggregate_field(es_index, "kb_id")
return [bucket["key"] for bucket in agg_result.get("buckets", [])]
except Exception as e:
logger.warning(f"Failed to list knowledge bases: {e}")
return []
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/migrator.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/ob_client.py | """
OceanBase Client for RAGFlow data migration.
This client is specifically designed for RAGFlow's data structure.
"""
import logging
from typing import Any
from pyobvector import ObVecClient, FtsIndexParam, FtsParser, VECTOR, ARRAY
from sqlalchemy import Column, String, Integer, Float, JSON, Double
from sqlalchemy.dialects.mysql import LONGTEXT, TEXT as MYSQL_TEXT
from .schema import RAGFLOW_COLUMNS, FTS_COLUMNS_TKS
logger = logging.getLogger(__name__)
# Index naming templates (from RAGFlow ob_conn.py)
INDEX_NAME_TEMPLATE = "ix_%s_%s"
FULLTEXT_INDEX_NAME_TEMPLATE = "fts_idx_%s"
VECTOR_INDEX_NAME_TEMPLATE = "%s_idx"
# Columns that need regular indexes
INDEX_COLUMNS = [
"kb_id",
"doc_id",
"available_int",
"knowledge_graph_kwd",
"entity_type_kwd",
"removed_kwd",
]
class OBClient:
"""OceanBase client wrapper for RAGFlow migration operations."""
def __init__(
self,
host: str = "localhost",
port: int = 2881,
user: str = "root",
password: str = "",
database: str = "test",
pool_size: int = 10,
):
"""
Initialize OceanBase client.
Args:
host: OceanBase host address
port: OceanBase port
user: Database user (format: user@tenant for OceanBase)
password: Database password
database: Database name
pool_size: Connection pool size
"""
self.host = host
self.port = port
self.user = user
self.password = password
self.database = database
# Initialize pyobvector client
self.uri = f"{host}:{port}"
self.client = ObVecClient(
uri=self.uri,
user=user,
password=password,
db_name=database,
pool_pre_ping=True,
pool_recycle=3600,
pool_size=pool_size,
)
logger.info(f"Connected to OceanBase at {self.uri}, database: {database}")
def health_check(self) -> bool:
"""Check database connectivity."""
try:
result = self.client.perform_raw_text_sql("SELECT 1 FROM DUAL")
result.fetchone()
return True
except Exception as e:
logger.error(f"OceanBase health check failed: {e}")
return False
def get_version(self) -> str | None:
"""Get OceanBase version."""
try:
result = self.client.perform_raw_text_sql("SELECT OB_VERSION() FROM DUAL")
row = result.fetchone()
return row[0] if row else None
except Exception as e:
logger.warning(f"Failed to get OceanBase version: {e}")
return None
def table_exists(self, table_name: str) -> bool:
"""Check if a table exists."""
try:
return self.client.check_table_exists(table_name)
except Exception:
return False
def create_ragflow_table(
self,
table_name: str,
vector_size: int = 768,
create_indexes: bool = True,
create_fts_indexes: bool = True,
):
"""
Create a RAGFlow-compatible table in OceanBase.
This creates a table with the exact schema that RAGFlow expects,
including all columns, indexes, and vector columns.
Args:
table_name: Name of the table (usually the ES index name)
vector_size: Vector dimension (e.g., 768, 1024, 1536)
create_indexes: Whether to create regular indexes
create_fts_indexes: Whether to create fulltext indexes
"""
# Build column definitions
columns = self._build_ragflow_columns()
# Add vector column
vector_column_name = f"q_{vector_size}_vec"
columns.append(
Column(vector_column_name, VECTOR(vector_size), nullable=True,
comment=f"vector embedding ({vector_size} dimensions)")
)
# Table options (from RAGFlow)
table_options = {
"mysql_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
"mysql_organization": "heap",
}
# Create table
self.client.create_table(
table_name=table_name,
columns=columns,
**table_options,
)
logger.info(f"Created table: {table_name}")
# Create regular indexes
if create_indexes:
self._create_regular_indexes(table_name)
# Create fulltext indexes
if create_fts_indexes:
self._create_fulltext_indexes(table_name)
# Create vector index
self._create_vector_index(table_name, vector_column_name)
# Refresh metadata
self.client.refresh_metadata([table_name])
def _build_ragflow_columns(self) -> list[Column]:
"""Build SQLAlchemy Column objects for RAGFlow schema."""
columns = []
for col_name, col_def in RAGFLOW_COLUMNS.items():
ob_type = col_def["ob_type"]
nullable = col_def.get("nullable", True)
default = col_def.get("default")
is_primary = col_def.get("is_primary", False)
is_array = col_def.get("is_array", False)
# Parse type and create appropriate Column
col = self._create_column(col_name, ob_type, nullable, default, is_primary, is_array)
columns.append(col)
return columns
def _create_column(
self,
name: str,
ob_type: str,
nullable: bool,
default: Any,
is_primary: bool,
is_array: bool,
) -> Column:
"""Create a SQLAlchemy Column object based on type string."""
# Handle array types
if is_array or ob_type.startswith("ARRAY"):
# Extract inner type
if "String" in ob_type:
inner_type = String(256)
elif "Integer" in ob_type:
inner_type = Integer
else:
inner_type = String(256)
# Nested array (e.g., ARRAY(ARRAY(Integer)))
if ob_type.count("ARRAY") > 1:
return Column(name, ARRAY(ARRAY(inner_type)), nullable=nullable)
else:
return Column(name, ARRAY(inner_type), nullable=nullable)
# Handle String types with length
if ob_type.startswith("String"):
# Extract length: String(256) -> 256
import re
match = re.search(r'\((\d+)\)', ob_type)
length = int(match.group(1)) if match else 256
return Column(
name, String(length),
primary_key=is_primary,
nullable=nullable,
server_default=f"'{default}'" if default else None
)
# Map other types
type_map = {
"Integer": Integer,
"Double": Double,
"Float": Float,
"JSON": JSON,
"LONGTEXT": LONGTEXT,
"TEXT": MYSQL_TEXT,
}
for type_name, type_class in type_map.items():
if type_name in ob_type:
return Column(
name, type_class,
primary_key=is_primary,
nullable=nullable,
server_default=str(default) if default is not None else None
)
# Default to String
return Column(name, String(256), nullable=nullable)
def _create_regular_indexes(self, table_name: str):
"""Create regular indexes for indexed columns."""
for col_name in INDEX_COLUMNS:
index_name = INDEX_NAME_TEMPLATE % (table_name, col_name)
try:
self.client.create_index(
table_name=table_name,
is_vec_index=False,
index_name=index_name,
column_names=[col_name],
)
logger.debug(f"Created index: {index_name}")
except Exception as e:
if "Duplicate" in str(e):
logger.debug(f"Index {index_name} already exists")
else:
logger.warning(f"Failed to create index {index_name}: {e}")
def _create_fulltext_indexes(self, table_name: str):
"""Create fulltext indexes for text columns."""
for fts_column in FTS_COLUMNS_TKS:
col_name = fts_column.split("^")[0] # Remove weight suffix
index_name = FULLTEXT_INDEX_NAME_TEMPLATE % col_name
try:
self.client.create_fts_idx_with_fts_index_param(
table_name=table_name,
fts_idx_param=FtsIndexParam(
index_name=index_name,
field_names=[col_name],
parser_type=FtsParser.IK,
),
)
logger.debug(f"Created fulltext index: {index_name}")
except Exception as e:
if "Duplicate" in str(e):
logger.debug(f"Fulltext index {index_name} already exists")
else:
logger.warning(f"Failed to create fulltext index {index_name}: {e}")
def _create_vector_index(self, table_name: str, vector_column_name: str):
"""Create vector index for embedding column."""
index_name = VECTOR_INDEX_NAME_TEMPLATE % vector_column_name
try:
self.client.create_index(
table_name=table_name,
is_vec_index=True,
index_name=index_name,
column_names=[vector_column_name],
vidx_params="distance=cosine, type=hnsw, lib=vsag",
)
logger.info(f"Created vector index: {index_name}")
except Exception as e:
if "Duplicate" in str(e):
logger.debug(f"Vector index {index_name} already exists")
else:
logger.warning(f"Failed to create vector index {index_name}: {e}")
def add_vector_column(self, table_name: str, vector_size: int):
"""Add a vector column to an existing table."""
vector_column_name = f"q_{vector_size}_vec"
# Check if column exists
if self._column_exists(table_name, vector_column_name):
logger.info(f"Vector column {vector_column_name} already exists")
return
try:
self.client.add_columns(
table_name=table_name,
columns=[Column(vector_column_name, VECTOR(vector_size), nullable=True)],
)
logger.info(f"Added vector column: {vector_column_name}")
# Create index
self._create_vector_index(table_name, vector_column_name)
except Exception as e:
logger.error(f"Failed to add vector column: {e}")
raise
def _column_exists(self, table_name: str, column_name: str) -> bool:
"""Check if a column exists in a table."""
try:
result = self.client.perform_raw_text_sql(
f"SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS "
f"WHERE TABLE_SCHEMA = '{self.database}' "
f"AND TABLE_NAME = '{table_name}' "
f"AND COLUMN_NAME = '{column_name}'"
)
count = result.fetchone()[0]
return count > 0
except Exception:
return False
def _index_exists(self, table_name: str, index_name: str) -> bool:
"""Check if an index exists."""
try:
result = self.client.perform_raw_text_sql(
f"SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS "
f"WHERE TABLE_SCHEMA = '{self.database}' "
f"AND TABLE_NAME = '{table_name}' "
f"AND INDEX_NAME = '{index_name}'"
)
count = result.fetchone()[0]
return count > 0
except Exception:
return False
def insert_batch(
self,
table_name: str,
documents: list[dict[str, Any]],
) -> int:
"""
Insert a batch of documents using upsert.
Args:
table_name: Name of the table
documents: List of documents to insert
Returns:
Number of documents inserted
"""
if not documents:
return 0
try:
self.client.upsert(table_name=table_name, data=documents)
return len(documents)
except Exception as e:
logger.error(f"Batch insert failed: {e}")
raise
def count_rows(self, table_name: str, kb_id: str | None = None) -> int:
"""
Count rows in a table.
Args:
table_name: Table name
kb_id: Optional knowledge base ID filter
"""
try:
sql = f"SELECT COUNT(*) FROM `{table_name}`"
if kb_id:
sql += f" WHERE kb_id = '{kb_id}'"
result = self.client.perform_raw_text_sql(sql)
return result.fetchone()[0]
except Exception:
return 0
def get_sample_rows(
self,
table_name: str,
limit: int = 10,
kb_id: str | None = None,
) -> list[dict[str, Any]]:
"""Get sample rows from a table."""
try:
sql = f"SELECT * FROM `{table_name}`"
if kb_id:
sql += f" WHERE kb_id = '{kb_id}'"
sql += f" LIMIT {limit}"
result = self.client.perform_raw_text_sql(sql)
columns = result.keys()
rows = []
for row in result:
rows.append(dict(zip(columns, row)))
return rows
except Exception as e:
logger.error(f"Failed to get sample rows: {e}")
return []
def get_row_by_id(self, table_name: str, doc_id: str) -> dict[str, Any] | None:
"""Get a single row by ID."""
try:
result = self.client.get(table_name=table_name, ids=[doc_id])
row = result.fetchone()
if row:
columns = result.keys()
return dict(zip(columns, row))
return None
except Exception as e:
logger.error(f"Failed to get row: {e}")
return None
def drop_table(self, table_name: str):
"""Drop a table if exists."""
try:
self.client.drop_table_if_exist(table_name)
logger.info(f"Dropped table: {table_name}")
except Exception as e:
logger.warning(f"Failed to drop table: {e}")
def execute_sql(self, sql: str) -> Any:
"""Execute raw SQL."""
return self.client.perform_raw_text_sql(sql)
def close(self):
"""Close the OB client connection."""
self.client.engine.dispose()
logger.info("OceanBase connection closed")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/ob_client.py",
"license": "Apache License 2.0",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/progress.py | """
Progress tracking and resume capability for migration.
"""
import json
import logging
from dataclasses import dataclass, field, asdict
from datetime import datetime
from pathlib import Path
from typing import Any
logger = logging.getLogger(__name__)
@dataclass
class MigrationProgress:
"""Migration progress state."""
# Basic info
es_index: str
ob_table: str
started_at: str = ""
updated_at: str = ""
# Progress counters
total_documents: int = 0
migrated_documents: int = 0
failed_documents: int = 0
# State for resume
last_sort_values: list[Any] = field(default_factory=list)
last_batch_ids: list[str] = field(default_factory=list)
# Status
status: str = "pending" # pending, running, completed, failed, paused
error_message: str = ""
# Schema info
schema_converted: bool = False
table_created: bool = False
indexes_created: bool = False
def __post_init__(self):
if not self.started_at:
self.started_at = datetime.utcnow().isoformat()
self.updated_at = datetime.utcnow().isoformat()
class ProgressManager:
"""Manage migration progress persistence."""
def __init__(self, progress_dir: str = ".migration_progress"):
"""
Initialize progress manager.
Args:
progress_dir: Directory to store progress files
"""
self.progress_dir = Path(progress_dir)
self.progress_dir.mkdir(parents=True, exist_ok=True)
def _get_progress_file(self, es_index: str, ob_table: str) -> Path:
"""Get progress file path for a migration."""
filename = f"{es_index}_to_{ob_table}.json"
return self.progress_dir / filename
def load_progress(
self, es_index: str, ob_table: str
) -> MigrationProgress | None:
"""
Load progress from file.
Args:
es_index: Elasticsearch index name
ob_table: OceanBase table name
Returns:
MigrationProgress if exists, None otherwise
"""
progress_file = self._get_progress_file(es_index, ob_table)
if not progress_file.exists():
return None
try:
with open(progress_file, "r") as f:
data = json.load(f)
progress = MigrationProgress(**data)
logger.info(
f"Loaded progress: {progress.migrated_documents}/{progress.total_documents} documents"
)
return progress
except Exception as e:
logger.warning(f"Failed to load progress: {e}")
return None
def save_progress(self, progress: MigrationProgress):
"""
Save progress to file.
Args:
progress: MigrationProgress instance
"""
progress.updated_at = datetime.utcnow().isoformat()
progress_file = self._get_progress_file(progress.es_index, progress.ob_table)
try:
with open(progress_file, "w") as f:
json.dump(asdict(progress), f, indent=2, default=str)
logger.debug(f"Saved progress to {progress_file}")
except Exception as e:
logger.error(f"Failed to save progress: {e}")
def delete_progress(self, es_index: str, ob_table: str):
"""Delete progress file."""
progress_file = self._get_progress_file(es_index, ob_table)
if progress_file.exists():
progress_file.unlink()
logger.info(f"Deleted progress file: {progress_file}")
def create_progress(
self,
es_index: str,
ob_table: str,
total_documents: int,
) -> MigrationProgress:
"""
Create new progress tracker.
Args:
es_index: Elasticsearch index name
ob_table: OceanBase table name
total_documents: Total documents to migrate
Returns:
New MigrationProgress instance
"""
progress = MigrationProgress(
es_index=es_index,
ob_table=ob_table,
total_documents=total_documents,
status="running",
)
self.save_progress(progress)
return progress
def update_progress(
self,
progress: MigrationProgress,
migrated_count: int,
last_sort_values: list[Any] | None = None,
last_batch_ids: list[str] | None = None,
):
"""
Update progress after a batch.
Args:
progress: MigrationProgress instance
migrated_count: Number of documents migrated in this batch
last_sort_values: Sort values for search_after
last_batch_ids: IDs of documents in last batch
"""
progress.migrated_documents += migrated_count
if last_sort_values:
progress.last_sort_values = last_sort_values
if last_batch_ids:
progress.last_batch_ids = last_batch_ids
self.save_progress(progress)
def mark_completed(self, progress: MigrationProgress):
"""Mark migration as completed."""
progress.status = "completed"
progress.updated_at = datetime.utcnow().isoformat()
self.save_progress(progress)
logger.info(
f"Migration completed: {progress.migrated_documents} documents"
)
def mark_failed(self, progress: MigrationProgress, error: str):
"""Mark migration as failed."""
progress.status = "failed"
progress.error_message = error
progress.updated_at = datetime.utcnow().isoformat()
self.save_progress(progress)
logger.error(f"Migration failed: {error}")
def mark_paused(self, progress: MigrationProgress):
"""Mark migration as paused (for resume later)."""
progress.status = "paused"
progress.updated_at = datetime.utcnow().isoformat()
self.save_progress(progress)
logger.info(
f"Migration paused at {progress.migrated_documents}/{progress.total_documents}"
)
def can_resume(self, es_index: str, ob_table: str) -> bool:
"""Check if migration can be resumed."""
progress = self.load_progress(es_index, ob_table)
if not progress:
return False
return progress.status in ("running", "paused", "failed")
def get_resume_info(self, es_index: str, ob_table: str) -> dict[str, Any] | None:
"""Get information needed to resume migration."""
progress = self.load_progress(es_index, ob_table)
if not progress:
return None
return {
"migrated_documents": progress.migrated_documents,
"total_documents": progress.total_documents,
"last_sort_values": progress.last_sort_values,
"last_batch_ids": progress.last_batch_ids,
"schema_converted": progress.schema_converted,
"table_created": progress.table_created,
"indexes_created": progress.indexes_created,
"status": progress.status,
}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/progress.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/schema.py | """
RAGFlow-specific schema conversion from Elasticsearch to OceanBase.
This module handles the fixed RAGFlow table structure migration.
RAGFlow uses a predefined schema for both ES and OceanBase.
"""
import json
import logging
import re
from typing import Any
logger = logging.getLogger(__name__)
# RAGFlow fixed column definitions (from rag/utils/ob_conn.py)
# These are the actual columns used by RAGFlow
RAGFLOW_COLUMNS = {
# Primary identifiers
"id": {"ob_type": "String(256)", "nullable": False, "is_primary": True},
"kb_id": {"ob_type": "String(256)", "nullable": False, "index": True},
"doc_id": {"ob_type": "String(256)", "nullable": True, "index": True},
# Document metadata
"docnm_kwd": {"ob_type": "String(256)", "nullable": True}, # document name
"doc_type_kwd": {"ob_type": "String(256)", "nullable": True}, # document type
# Title fields
"title_tks": {"ob_type": "String(256)", "nullable": True}, # title tokens
"title_sm_tks": {"ob_type": "String(256)", "nullable": True}, # fine-grained title tokens
# Content fields
"content_with_weight": {"ob_type": "LONGTEXT", "nullable": True}, # original content
"content_ltks": {"ob_type": "LONGTEXT", "nullable": True}, # long text tokens
"content_sm_ltks": {"ob_type": "LONGTEXT", "nullable": True}, # fine-grained tokens
# Feature fields
"pagerank_fea": {"ob_type": "Integer", "nullable": True}, # page rank priority
# Array fields
"important_kwd": {"ob_type": "ARRAY(String(256))", "nullable": True, "is_array": True}, # keywords
"important_tks": {"ob_type": "TEXT", "nullable": True}, # keyword tokens
"question_kwd": {"ob_type": "ARRAY(String(1024))", "nullable": True, "is_array": True}, # questions
"question_tks": {"ob_type": "TEXT", "nullable": True}, # question tokens
"tag_kwd": {"ob_type": "ARRAY(String(256))", "nullable": True, "is_array": True}, # tags
"tag_feas": {"ob_type": "JSON", "nullable": True, "is_json": True}, # tag features
# Status fields
"available_int": {"ob_type": "Integer", "nullable": False, "default": 1},
# Time fields
"create_time": {"ob_type": "String(19)", "nullable": True},
"create_timestamp_flt": {"ob_type": "Double", "nullable": True},
# Image field
"img_id": {"ob_type": "String(128)", "nullable": True},
# Position fields (arrays)
"position_int": {"ob_type": "ARRAY(ARRAY(Integer))", "nullable": True, "is_array": True},
"page_num_int": {"ob_type": "ARRAY(Integer)", "nullable": True, "is_array": True},
"top_int": {"ob_type": "ARRAY(Integer)", "nullable": True, "is_array": True},
# Knowledge graph fields
"knowledge_graph_kwd": {"ob_type": "String(256)", "nullable": True, "index": True},
"source_id": {"ob_type": "ARRAY(String(256))", "nullable": True, "is_array": True},
"entity_kwd": {"ob_type": "String(256)", "nullable": True},
"entity_type_kwd": {"ob_type": "String(256)", "nullable": True, "index": True},
"from_entity_kwd": {"ob_type": "String(256)", "nullable": True},
"to_entity_kwd": {"ob_type": "String(256)", "nullable": True},
"weight_int": {"ob_type": "Integer", "nullable": True},
"weight_flt": {"ob_type": "Double", "nullable": True},
"entities_kwd": {"ob_type": "ARRAY(String(256))", "nullable": True, "is_array": True},
"rank_flt": {"ob_type": "Double", "nullable": True},
# Status
"removed_kwd": {"ob_type": "String(256)", "nullable": True, "index": True, "default": "N"},
# JSON fields
"metadata": {"ob_type": "JSON", "nullable": True, "is_json": True},
"extra": {"ob_type": "JSON", "nullable": True, "is_json": True},
# New columns
"_order_id": {"ob_type": "Integer", "nullable": True},
"group_id": {"ob_type": "String(256)", "nullable": True},
"mom_id": {"ob_type": "String(256)", "nullable": True},
}
# Array column names for special handling
ARRAY_COLUMNS = [
"important_kwd", "question_kwd", "tag_kwd", "source_id",
"entities_kwd", "position_int", "page_num_int", "top_int"
]
# JSON column names
JSON_COLUMNS = ["tag_feas", "metadata", "extra"]
# Fulltext search columns (for reference)
FTS_COLUMNS_ORIGIN = ["docnm_kwd", "content_with_weight", "important_tks", "question_tks"]
FTS_COLUMNS_TKS = ["title_tks", "title_sm_tks", "important_tks", "question_tks", "content_ltks", "content_sm_ltks"]
# Vector field pattern: q_{vector_size}_vec
VECTOR_FIELD_PATTERN = re.compile(r"q_(?P<vector_size>\d+)_vec")
class RAGFlowSchemaConverter:
"""
Convert RAGFlow Elasticsearch documents to OceanBase format.
RAGFlow uses a fixed schema, so this converter knows exactly
what fields to expect and how to map them.
"""
def __init__(self):
self.vector_fields: list[dict[str, Any]] = []
self.detected_vector_size: int | None = None
def analyze_es_mapping(self, es_mapping: dict[str, Any]) -> dict[str, Any]:
"""
Analyze ES mapping to extract vector field dimensions.
Args:
es_mapping: Elasticsearch index mapping
Returns:
Analysis result with detected fields
"""
result = {
"known_fields": [],
"vector_fields": [],
"unknown_fields": [],
}
properties = es_mapping.get("properties", {})
for field_name, field_def in properties.items():
# Check if it's a known RAGFlow field
if field_name in RAGFLOW_COLUMNS:
result["known_fields"].append(field_name)
# Check if it's a vector field
elif VECTOR_FIELD_PATTERN.match(field_name):
match = VECTOR_FIELD_PATTERN.match(field_name)
vec_size = int(match.group("vector_size"))
result["vector_fields"].append({
"name": field_name,
"dimension": vec_size,
})
self.vector_fields.append({
"name": field_name,
"dimension": vec_size,
})
if self.detected_vector_size is None:
self.detected_vector_size = vec_size
else:
# Unknown field - might be custom field stored in 'extra'
result["unknown_fields"].append(field_name)
logger.info(
f"Analyzed ES mapping: {len(result['known_fields'])} known fields, "
f"{len(result['vector_fields'])} vector fields, "
f"{len(result['unknown_fields'])} unknown fields"
)
return result
def get_column_definitions(self) -> list[dict[str, Any]]:
"""
Get RAGFlow column definitions for OceanBase table creation.
Returns:
List of column definitions
"""
columns = []
for col_name, col_def in RAGFLOW_COLUMNS.items():
columns.append({
"name": col_name,
"ob_type": col_def["ob_type"],
"nullable": col_def.get("nullable", True),
"is_primary": col_def.get("is_primary", False),
"index": col_def.get("index", False),
"is_array": col_def.get("is_array", False),
"is_json": col_def.get("is_json", False),
"default": col_def.get("default"),
})
# Add detected vector fields
for vec_field in self.vector_fields:
columns.append({
"name": vec_field["name"],
"ob_type": f"VECTOR({vec_field['dimension']})",
"nullable": True,
"is_vector": True,
"dimension": vec_field["dimension"],
})
return columns
def get_vector_fields(self) -> list[dict[str, Any]]:
"""Get list of vector fields for index creation."""
return self.vector_fields
class RAGFlowDataConverter:
"""
Convert RAGFlow ES documents to OceanBase row format.
This converter handles the specific data transformations needed
for RAGFlow's data structure.
"""
def __init__(self):
"""Initialize data converter."""
self.vector_fields: set[str] = set()
def detect_vector_fields(self, doc: dict[str, Any]) -> None:
"""Detect vector fields from a sample document."""
for key in doc.keys():
if VECTOR_FIELD_PATTERN.match(key):
self.vector_fields.add(key)
def convert_document(self, es_doc: dict[str, Any]) -> dict[str, Any]:
"""
Convert an ES document to OceanBase row format.
Args:
es_doc: Elasticsearch document (with _id and _source)
Returns:
Dictionary ready for OceanBase insertion
"""
# Extract _id and _source
doc_id = es_doc.get("_id")
source = es_doc.get("_source", es_doc)
row = {}
# Set document ID
if doc_id:
row["id"] = str(doc_id)
elif "id" in source:
row["id"] = str(source["id"])
# Process each field
for field_name, field_def in RAGFLOW_COLUMNS.items():
if field_name == "id":
continue # Already handled
value = source.get(field_name)
if value is None:
# Use default if available
default = field_def.get("default")
if default is not None:
row[field_name] = default
continue
# Convert based on field type
row[field_name] = self._convert_field_value(
field_name, value, field_def
)
# Handle vector fields
for key, value in source.items():
if VECTOR_FIELD_PATTERN.match(key):
if isinstance(value, list):
row[key] = value
self.vector_fields.add(key)
# Handle unknown fields -> store in 'extra'
extra_fields = {}
for key, value in source.items():
if key not in RAGFLOW_COLUMNS and not VECTOR_FIELD_PATTERN.match(key):
extra_fields[key] = value
if extra_fields:
existing_extra = row.get("extra")
if existing_extra and isinstance(existing_extra, dict):
existing_extra.update(extra_fields)
else:
row["extra"] = json.dumps(extra_fields, ensure_ascii=False)
return row
def _convert_field_value(
self,
field_name: str,
value: Any,
field_def: dict[str, Any]
) -> Any:
"""
Convert a field value to the appropriate format for OceanBase.
Args:
field_name: Field name
value: Original value from ES
field_def: Field definition from RAGFLOW_COLUMNS
Returns:
Converted value
"""
if value is None:
return None
ob_type = field_def.get("ob_type", "")
is_array = field_def.get("is_array", False)
is_json = field_def.get("is_json", False)
# Handle array fields
if is_array:
return self._convert_array_value(value)
# Handle JSON fields
if is_json:
return self._convert_json_value(value)
# Handle specific types
if "Integer" in ob_type:
return self._convert_integer(value)
if "Double" in ob_type or "Float" in ob_type:
return self._convert_float(value)
if "LONGTEXT" in ob_type or "TEXT" in ob_type:
return self._convert_text(value)
if "String" in ob_type:
return self._convert_string(value, field_name)
# Default: convert to string
return str(value) if value is not None else None
def _convert_array_value(self, value: Any) -> str | None:
"""Convert array value to JSON string for OceanBase."""
if value is None:
return None
if isinstance(value, str):
# Already a JSON string
try:
# Validate it's valid JSON
json.loads(value)
return value
except json.JSONDecodeError:
# Not valid JSON, wrap in array
return json.dumps([value], ensure_ascii=False)
if isinstance(value, list):
# Clean array values
cleaned = []
for item in value:
if isinstance(item, str):
# Clean special characters
cleaned_str = item.strip()
cleaned_str = cleaned_str.replace('\\', '\\\\')
cleaned_str = cleaned_str.replace('\n', '\\n')
cleaned_str = cleaned_str.replace('\r', '\\r')
cleaned_str = cleaned_str.replace('\t', '\\t')
cleaned.append(cleaned_str)
else:
cleaned.append(item)
return json.dumps(cleaned, ensure_ascii=False)
# Single value - wrap in array
return json.dumps([value], ensure_ascii=False)
def _convert_json_value(self, value: Any) -> str | None:
"""Convert JSON value to string for OceanBase."""
if value is None:
return None
if isinstance(value, str):
# Already a string, validate JSON
try:
json.loads(value)
return value
except json.JSONDecodeError:
# Not valid JSON, return as-is
return value
if isinstance(value, (dict, list)):
return json.dumps(value, ensure_ascii=False)
return str(value)
def _convert_integer(self, value: Any) -> int | None:
"""Convert to integer."""
if value is None:
return None
if isinstance(value, bool):
return 1 if value else 0
try:
return int(value)
except (ValueError, TypeError):
return None
def _convert_float(self, value: Any) -> float | None:
"""Convert to float."""
if value is None:
return None
try:
return float(value)
except (ValueError, TypeError):
return None
def _convert_text(self, value: Any) -> str | None:
"""Convert to text/longtext."""
if value is None:
return None
if isinstance(value, dict):
# content_with_weight might be stored as dict
return json.dumps(value, ensure_ascii=False)
if isinstance(value, list):
return json.dumps(value, ensure_ascii=False)
return str(value)
def _convert_string(self, value: Any, field_name: str) -> str | None:
"""Convert to string with length considerations."""
if value is None:
return None
# Handle kb_id which might be a list in ES
if field_name == "kb_id" and isinstance(value, list):
return str(value[0]) if value else None
if isinstance(value, (dict, list)):
return json.dumps(value, ensure_ascii=False)
return str(value)
def convert_batch(self, es_docs: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""
Convert a batch of ES documents.
Args:
es_docs: List of Elasticsearch documents
Returns:
List of dictionaries ready for OceanBase insertion
"""
return [self.convert_document(doc) for doc in es_docs]
# Backwards compatibility aliases
SchemaConverter = RAGFlowSchemaConverter
DataConverter = RAGFlowDataConverter
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/schema.py",
"license": "Apache License 2.0",
"lines": 357,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/verify.py | """
Data verification for RAGFlow migration.
"""
import json
import logging
from dataclasses import dataclass, field
from typing import Any
from .es_client import ESClient
from .ob_client import OBClient
from .schema import ARRAY_COLUMNS, JSON_COLUMNS
logger = logging.getLogger(__name__)
@dataclass
class VerificationResult:
"""Migration verification result."""
es_index: str
ob_table: str
# Counts
es_count: int = 0
ob_count: int = 0
count_match: bool = False
count_diff: int = 0
# Sample verification
sample_size: int = 0
samples_verified: int = 0
samples_matched: int = 0
sample_match_rate: float = 0.0
# Mismatches
missing_in_ob: list[str] = field(default_factory=list)
data_mismatches: list[dict[str, Any]] = field(default_factory=list)
# Overall
passed: bool = False
message: str = ""
class MigrationVerifier:
"""Verify RAGFlow migration data consistency."""
# Fields to compare for verification
VERIFY_FIELDS = [
"id", "kb_id", "doc_id", "docnm_kwd", "content_with_weight",
"available_int", "create_time",
]
def __init__(
self,
es_client: ESClient,
ob_client: OBClient,
):
"""
Initialize verifier.
Args:
es_client: Elasticsearch client
ob_client: OceanBase client
"""
self.es_client = es_client
self.ob_client = ob_client
def verify(
self,
es_index: str,
ob_table: str,
sample_size: int = 100,
primary_key: str = "id",
verify_fields: list[str] | None = None,
) -> VerificationResult:
"""
Verify migration by comparing ES and OceanBase data.
Args:
es_index: Elasticsearch index name
ob_table: OceanBase table name
sample_size: Number of documents to sample for verification
primary_key: Primary key column name
verify_fields: Fields to verify (None = use defaults)
Returns:
VerificationResult with details
"""
result = VerificationResult(
es_index=es_index,
ob_table=ob_table,
)
if verify_fields is None:
verify_fields = self.VERIFY_FIELDS
# Step 1: Verify document counts
logger.info("Verifying document counts...")
result.es_count = self.es_client.count_documents(es_index)
result.ob_count = self.ob_client.count_rows(ob_table)
result.count_diff = abs(result.es_count - result.ob_count)
result.count_match = result.count_diff == 0
logger.info(
f"Document counts - ES: {result.es_count}, OB: {result.ob_count}, "
f"Diff: {result.count_diff}"
)
# Step 2: Sample verification
result.sample_size = min(sample_size, result.es_count)
if result.sample_size > 0:
logger.info(f"Verifying {result.sample_size} sample documents...")
self._verify_samples(
es_index, ob_table, result, primary_key, verify_fields
)
# Step 3: Determine overall result
self._determine_result(result)
logger.info(result.message)
return result
def _verify_samples(
self,
es_index: str,
ob_table: str,
result: VerificationResult,
primary_key: str,
verify_fields: list[str],
):
"""Verify sample documents."""
# Get sample documents from ES
es_samples = self.es_client.get_sample_documents(
es_index, result.sample_size
)
for es_doc in es_samples:
result.samples_verified += 1
doc_id = es_doc.get("_id") or es_doc.get("id")
if not doc_id:
logger.warning("Document without ID found")
continue
# Get corresponding document from OceanBase
ob_doc = self.ob_client.get_row_by_id(ob_table, doc_id)
if ob_doc is None:
result.missing_in_ob.append(doc_id)
continue
# Compare documents
match, differences = self._compare_documents(
es_doc, ob_doc, verify_fields
)
if match:
result.samples_matched += 1
else:
result.data_mismatches.append({
"id": doc_id,
"differences": differences,
})
# Calculate match rate
if result.samples_verified > 0:
result.sample_match_rate = result.samples_matched / result.samples_verified
def _compare_documents(
self,
es_doc: dict[str, Any],
ob_doc: dict[str, Any],
verify_fields: list[str],
) -> tuple[bool, list[dict[str, Any]]]:
"""
Compare ES document with OceanBase row.
Returns:
Tuple of (match: bool, differences: list)
"""
differences = []
for field_name in verify_fields:
es_value = es_doc.get(field_name)
ob_value = ob_doc.get(field_name)
# Skip if both are None/null
if es_value is None and ob_value is None:
continue
# Handle special comparisons
if not self._values_equal(field_name, es_value, ob_value):
differences.append({
"field": field_name,
"es_value": es_value,
"ob_value": ob_value,
})
return len(differences) == 0, differences
def _values_equal(
self,
field_name: str,
es_value: Any,
ob_value: Any
) -> bool:
"""Compare two values with type-aware logic."""
if es_value is None and ob_value is None:
return True
if es_value is None or ob_value is None:
# One is None, the other isn't
# For optional fields, this might be acceptable
return False
# Handle array fields (stored as JSON strings in OB)
if field_name in ARRAY_COLUMNS:
if isinstance(ob_value, str):
try:
ob_value = json.loads(ob_value)
except json.JSONDecodeError:
pass
if isinstance(es_value, list) and isinstance(ob_value, list):
return set(str(x) for x in es_value) == set(str(x) for x in ob_value)
# Handle JSON fields
if field_name in JSON_COLUMNS:
if isinstance(ob_value, str):
try:
ob_value = json.loads(ob_value)
except json.JSONDecodeError:
pass
if isinstance(es_value, str):
try:
es_value = json.loads(es_value)
except json.JSONDecodeError:
pass
return es_value == ob_value
# Handle content_with_weight which might be dict or string
if field_name == "content_with_weight":
if isinstance(ob_value, str) and isinstance(es_value, dict):
try:
ob_value = json.loads(ob_value)
except json.JSONDecodeError:
pass
# Handle kb_id which might be list in ES
if field_name == "kb_id":
if isinstance(es_value, list) and len(es_value) > 0:
es_value = es_value[0]
# Standard comparison
return str(es_value) == str(ob_value)
def _determine_result(self, result: VerificationResult):
"""Determine overall verification result."""
# Allow small count differences (e.g., documents added during migration)
count_tolerance = 0.01 # 1% tolerance
count_ok = (
result.count_match or
(result.es_count > 0 and result.count_diff / result.es_count <= count_tolerance)
)
if count_ok and result.sample_match_rate >= 0.99:
result.passed = True
result.message = (
f"Verification PASSED. "
f"ES: {result.es_count:,}, OB: {result.ob_count:,}. "
f"Sample match rate: {result.sample_match_rate:.2%}"
)
elif count_ok and result.sample_match_rate >= 0.95:
result.passed = True
result.message = (
f"Verification PASSED with warnings. "
f"ES: {result.es_count:,}, OB: {result.ob_count:,}. "
f"Sample match rate: {result.sample_match_rate:.2%}"
)
else:
result.passed = False
issues = []
if not count_ok:
issues.append(
f"Count mismatch (ES: {result.es_count}, OB: {result.ob_count}, diff: {result.count_diff})"
)
if result.sample_match_rate < 0.95:
issues.append(f"Low sample match rate: {result.sample_match_rate:.2%}")
if result.missing_in_ob:
issues.append(f"{len(result.missing_in_ob)} documents missing in OB")
result.message = f"Verification FAILED: {'; '.join(issues)}"
def generate_report(self, result: VerificationResult) -> str:
"""Generate a verification report."""
lines = [
"",
"=" * 60,
"Migration Verification Report",
"=" * 60,
f"ES Index: {result.es_index}",
f"OB Table: {result.ob_table}",
]
lines.extend([
"",
"Document Counts:",
f" Elasticsearch: {result.es_count:,}",
f" OceanBase: {result.ob_count:,}",
f" Difference: {result.count_diff:,}",
f" Match: {'Yes' if result.count_match else 'No'}",
"",
"Sample Verification:",
f" Sample Size: {result.sample_size}",
f" Verified: {result.samples_verified}",
f" Matched: {result.samples_matched}",
f" Match Rate: {result.sample_match_rate:.2%}",
"",
])
if result.missing_in_ob:
lines.append(f"Missing in OceanBase ({len(result.missing_in_ob)}):")
for doc_id in result.missing_in_ob[:5]:
lines.append(f" - {doc_id}")
if len(result.missing_in_ob) > 5:
lines.append(f" ... and {len(result.missing_in_ob) - 5} more")
lines.append("")
if result.data_mismatches:
lines.append(f"Data Mismatches ({len(result.data_mismatches)}):")
for mismatch in result.data_mismatches[:3]:
lines.append(f" - ID: {mismatch['id']}")
for diff in mismatch.get("differences", [])[:2]:
lines.append(f" {diff['field']}: ES={diff['es_value']}, OB={diff['ob_value']}")
if len(result.data_mismatches) > 3:
lines.append(f" ... and {len(result.data_mismatches) - 3} more")
lines.append("")
lines.extend([
"=" * 60,
f"Result: {'PASSED' if result.passed else 'FAILED'}",
result.message,
"=" * 60,
"",
])
return "\n".join(lines)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/verify.py",
"license": "Apache License 2.0",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:tools/es-to-oceanbase-migration/tests/test_progress.py | """
Tests for progress tracking and resume capability.
"""
import json
import os
import tempfile
import pytest
from pathlib import Path
from es_ob_migration.progress import MigrationProgress, ProgressManager
class TestMigrationProgress:
"""Test MigrationProgress dataclass."""
def test_create_basic_progress(self):
"""Test creating a basic progress object."""
progress = MigrationProgress(
es_index="ragflow_test",
ob_table="ragflow_test",
)
assert progress.es_index == "ragflow_test"
assert progress.ob_table == "ragflow_test"
assert progress.total_documents == 0
assert progress.migrated_documents == 0
assert progress.status == "pending"
assert progress.started_at != ""
assert progress.updated_at != ""
def test_create_progress_with_counts(self):
"""Test creating progress with document counts."""
progress = MigrationProgress(
es_index="ragflow_test",
ob_table="ragflow_test",
total_documents=1000,
migrated_documents=500,
)
assert progress.total_documents == 1000
assert progress.migrated_documents == 500
def test_progress_default_values(self):
"""Test default values."""
progress = MigrationProgress(
es_index="test_index",
ob_table="test_table",
)
assert progress.failed_documents == 0
assert progress.last_sort_values == []
assert progress.last_batch_ids == []
assert progress.error_message == ""
assert progress.schema_converted is False
assert progress.table_created is False
assert progress.indexes_created is False
def test_progress_status_values(self):
"""Test various status values."""
for status in ["pending", "running", "completed", "failed", "paused"]:
progress = MigrationProgress(
es_index="test",
ob_table="test",
status=status,
)
assert progress.status == status
class TestProgressManager:
"""Test ProgressManager class."""
@pytest.fixture
def temp_dir(self):
"""Create a temporary directory for tests."""
with tempfile.TemporaryDirectory() as tmpdir:
yield tmpdir
@pytest.fixture
def manager(self, temp_dir):
"""Create a ProgressManager with temp directory."""
return ProgressManager(progress_dir=temp_dir)
def test_create_progress_manager(self, temp_dir):
"""Test creating a progress manager."""
manager = ProgressManager(progress_dir=temp_dir)
assert manager.progress_dir.exists()
def test_create_progress_manager_creates_dir(self, temp_dir):
"""Test that progress manager creates directory."""
new_dir = os.path.join(temp_dir, "new_progress")
ProgressManager(progress_dir=new_dir)
assert Path(new_dir).exists()
def test_create_progress(self, manager):
"""Test creating new progress."""
progress = manager.create_progress(
es_index="ragflow_abc123",
ob_table="ragflow_abc123",
total_documents=1000,
)
assert progress.es_index == "ragflow_abc123"
assert progress.ob_table == "ragflow_abc123"
assert progress.total_documents == 1000
assert progress.status == "running"
def test_save_and_load_progress(self, manager):
"""Test saving and loading progress."""
# Create and save
progress = manager.create_progress(
es_index="ragflow_test",
ob_table="ragflow_test",
total_documents=500,
)
progress.migrated_documents = 250
progress.last_sort_values = ["doc_250", 1234567890]
manager.save_progress(progress)
# Load
loaded = manager.load_progress("ragflow_test", "ragflow_test")
assert loaded is not None
assert loaded.es_index == "ragflow_test"
assert loaded.total_documents == 500
assert loaded.migrated_documents == 250
assert loaded.last_sort_values == ["doc_250", 1234567890]
def test_load_nonexistent_progress(self, manager):
"""Test loading progress that doesn't exist."""
loaded = manager.load_progress("nonexistent", "nonexistent")
assert loaded is None
def test_delete_progress(self, manager):
"""Test deleting progress."""
# Create progress
manager.create_progress(
es_index="ragflow_delete_test",
ob_table="ragflow_delete_test",
total_documents=100,
)
# Verify it exists
assert manager.load_progress("ragflow_delete_test", "ragflow_delete_test") is not None
# Delete
manager.delete_progress("ragflow_delete_test", "ragflow_delete_test")
# Verify it's gone
assert manager.load_progress("ragflow_delete_test", "ragflow_delete_test") is None
def test_update_progress(self, manager):
"""Test updating progress."""
progress = manager.create_progress(
es_index="ragflow_update",
ob_table="ragflow_update",
total_documents=1000,
)
# Update
manager.update_progress(
progress,
migrated_count=100,
last_sort_values=["doc_100", 9999],
last_batch_ids=["id1", "id2", "id3"],
)
assert progress.migrated_documents == 100
assert progress.last_sort_values == ["doc_100", 9999]
assert progress.last_batch_ids == ["id1", "id2", "id3"]
def test_update_progress_multiple_batches(self, manager):
"""Test updating progress multiple times."""
progress = manager.create_progress(
es_index="ragflow_multi",
ob_table="ragflow_multi",
total_documents=1000,
)
# Update multiple times
for i in range(1, 11):
manager.update_progress(progress, migrated_count=100)
assert progress.migrated_documents == 1000
def test_mark_completed(self, manager):
"""Test marking migration as completed."""
progress = manager.create_progress(
es_index="ragflow_complete",
ob_table="ragflow_complete",
total_documents=100,
)
progress.migrated_documents = 100
manager.mark_completed(progress)
assert progress.status == "completed"
def test_mark_failed(self, manager):
"""Test marking migration as failed."""
progress = manager.create_progress(
es_index="ragflow_fail",
ob_table="ragflow_fail",
total_documents=100,
)
manager.mark_failed(progress, "Connection timeout")
assert progress.status == "failed"
assert progress.error_message == "Connection timeout"
def test_mark_paused(self, manager):
"""Test marking migration as paused."""
progress = manager.create_progress(
es_index="ragflow_pause",
ob_table="ragflow_pause",
total_documents=1000,
)
progress.migrated_documents = 500
manager.mark_paused(progress)
assert progress.status == "paused"
def test_can_resume_running(self, manager):
"""Test can_resume for running migration."""
manager.create_progress(
es_index="ragflow_resume_running",
ob_table="ragflow_resume_running",
total_documents=1000,
)
assert manager.can_resume("ragflow_resume_running", "ragflow_resume_running") is True
def test_can_resume_paused(self, manager):
"""Test can_resume for paused migration."""
progress = manager.create_progress(
es_index="ragflow_resume_paused",
ob_table="ragflow_resume_paused",
total_documents=1000,
)
manager.mark_paused(progress)
assert manager.can_resume("ragflow_resume_paused", "ragflow_resume_paused") is True
def test_can_resume_completed(self, manager):
"""Test can_resume for completed migration."""
progress = manager.create_progress(
es_index="ragflow_resume_complete",
ob_table="ragflow_resume_complete",
total_documents=100,
)
progress.migrated_documents = 100
manager.mark_completed(progress)
# Completed migrations should not be resumed
assert manager.can_resume("ragflow_resume_complete", "ragflow_resume_complete") is False
def test_can_resume_nonexistent(self, manager):
"""Test can_resume for nonexistent migration."""
assert manager.can_resume("nonexistent", "nonexistent") is False
def test_get_resume_info(self, manager):
"""Test getting resume information."""
progress = manager.create_progress(
es_index="ragflow_info",
ob_table="ragflow_info",
total_documents=1000,
)
progress.migrated_documents = 500
progress.last_sort_values = ["doc_500", 12345]
progress.schema_converted = True
progress.table_created = True
manager.save_progress(progress)
info = manager.get_resume_info("ragflow_info", "ragflow_info")
assert info is not None
assert info["migrated_documents"] == 500
assert info["total_documents"] == 1000
assert info["last_sort_values"] == ["doc_500", 12345]
assert info["schema_converted"] is True
assert info["table_created"] is True
assert info["status"] == "running"
def test_get_resume_info_nonexistent(self, manager):
"""Test getting resume info for nonexistent migration."""
info = manager.get_resume_info("nonexistent", "nonexistent")
assert info is None
def test_progress_file_path(self, manager):
"""Test progress file naming."""
manager.create_progress(
es_index="ragflow_abc123",
ob_table="ragflow_abc123",
total_documents=100,
)
expected_file = manager.progress_dir / "ragflow_abc123_to_ragflow_abc123.json"
assert expected_file.exists()
def test_progress_file_content(self, manager):
"""Test progress file JSON content."""
progress = manager.create_progress(
es_index="ragflow_json",
ob_table="ragflow_json",
total_documents=100,
)
progress.migrated_documents = 50
manager.save_progress(progress)
# Read file directly
progress_file = manager.progress_dir / "ragflow_json_to_ragflow_json.json"
with open(progress_file) as f:
data = json.load(f)
assert data["es_index"] == "ragflow_json"
assert data["ob_table"] == "ragflow_json"
assert data["total_documents"] == 100
assert data["migrated_documents"] == 50
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/tests/test_progress.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:tools/es-to-oceanbase-migration/tests/test_schema.py | """
Tests for RAGFlow schema conversion.
This module tests:
- RAGFlowSchemaConverter: Analyzes ES mappings and generates OB column definitions
- RAGFlowDataConverter: Converts ES documents to OceanBase row format
- Vector field pattern matching
- Schema constants
"""
import json
from es_ob_migration.schema import (
RAGFlowSchemaConverter,
RAGFlowDataConverter,
RAGFLOW_COLUMNS,
ARRAY_COLUMNS,
JSON_COLUMNS,
VECTOR_FIELD_PATTERN,
FTS_COLUMNS_ORIGIN,
FTS_COLUMNS_TKS,
)
class TestRAGFlowSchemaConverter:
"""Test RAGFlowSchemaConverter class."""
def test_analyze_ragflow_mapping(self):
"""Test analyzing a RAGFlow ES mapping."""
converter = RAGFlowSchemaConverter()
# Simulate a RAGFlow ES mapping
es_mapping = {
"properties": {
"id": {"type": "keyword"},
"kb_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"docnm_kwd": {"type": "keyword"},
"content_with_weight": {"type": "text"},
"content_ltks": {"type": "text"},
"available_int": {"type": "integer"},
"important_kwd": {"type": "keyword"},
"q_768_vec": {"type": "dense_vector", "dims": 768},
}
}
analysis = converter.analyze_es_mapping(es_mapping)
# Check known fields
assert "id" in analysis["known_fields"]
assert "kb_id" in analysis["known_fields"]
assert "content_with_weight" in analysis["known_fields"]
# Check vector fields
assert len(analysis["vector_fields"]) == 1
assert analysis["vector_fields"][0]["name"] == "q_768_vec"
assert analysis["vector_fields"][0]["dimension"] == 768
def test_detect_vector_size(self):
"""Test automatic vector size detection."""
converter = RAGFlowSchemaConverter()
es_mapping = {
"properties": {
"q_1536_vec": {"type": "dense_vector", "dims": 1536},
}
}
converter.analyze_es_mapping(es_mapping)
assert converter.detected_vector_size == 1536
def test_unknown_fields(self):
"""Test that unknown fields are properly identified."""
converter = RAGFlowSchemaConverter()
es_mapping = {
"properties": {
"id": {"type": "keyword"},
"custom_field": {"type": "text"},
"another_field": {"type": "integer"},
}
}
analysis = converter.analyze_es_mapping(es_mapping)
assert "custom_field" in analysis["unknown_fields"]
assert "another_field" in analysis["unknown_fields"]
def test_get_column_definitions(self):
"""Test getting RAGFlow column definitions."""
converter = RAGFlowSchemaConverter()
# First analyze to detect vector fields
es_mapping = {
"properties": {
"q_768_vec": {"type": "dense_vector", "dims": 768},
}
}
converter.analyze_es_mapping(es_mapping)
columns = converter.get_column_definitions()
# Check that all RAGFlow columns are present
column_names = [c["name"] for c in columns]
for col_name in RAGFLOW_COLUMNS:
assert col_name in column_names, f"Missing column: {col_name}"
# Check vector column is added
assert "q_768_vec" in column_names
class TestRAGFlowDataConverter:
"""Test RAGFlowDataConverter class."""
def test_convert_basic_document(self):
"""Test converting a basic RAGFlow document."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "test-id-123",
"_source": {
"id": "test-id-123",
"kb_id": "kb-001",
"doc_id": "doc-001",
"docnm_kwd": "test_document.pdf",
"content_with_weight": "This is test content",
"available_int": 1,
}
}
row = converter.convert_document(es_doc)
assert row["id"] == "test-id-123"
assert row["kb_id"] == "kb-001"
assert row["doc_id"] == "doc-001"
assert row["docnm_kwd"] == "test_document.pdf"
assert row["content_with_weight"] == "This is test content"
assert row["available_int"] == 1
def test_convert_with_vector(self):
"""Test converting document with vector embedding."""
converter = RAGFlowDataConverter()
embedding = [0.1] * 768
es_doc = {
"_id": "vec-doc-001",
"_source": {
"id": "vec-doc-001",
"kb_id": "kb-001",
"q_768_vec": embedding,
}
}
row = converter.convert_document(es_doc)
assert row["id"] == "vec-doc-001"
assert row["q_768_vec"] == embedding
assert "q_768_vec" in converter.vector_fields
def test_convert_array_fields(self):
"""Test converting array fields."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "array-doc",
"_source": {
"id": "array-doc",
"kb_id": "kb-001",
"important_kwd": ["keyword1", "keyword2", "keyword3"],
"question_kwd": ["What is this?", "How does it work?"],
"tag_kwd": ["tag1", "tag2"],
}
}
row = converter.convert_document(es_doc)
# Array fields should be JSON strings
assert isinstance(row["important_kwd"], str)
parsed = json.loads(row["important_kwd"])
assert parsed == ["keyword1", "keyword2", "keyword3"]
def test_convert_json_fields(self):
"""Test converting JSON fields."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "json-doc",
"_source": {
"id": "json-doc",
"kb_id": "kb-001",
"tag_feas": {"tag1": 0.8, "tag2": 0.5},
"metadata": {"author": "John", "date": "2024-01-01"},
}
}
row = converter.convert_document(es_doc)
# JSON fields should be JSON strings
assert isinstance(row["tag_feas"], str)
assert isinstance(row["metadata"], str)
tag_feas = json.loads(row["tag_feas"])
assert tag_feas == {"tag1": 0.8, "tag2": 0.5}
def test_convert_unknown_fields_to_extra(self):
"""Test that unknown fields are stored in 'extra'."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "extra-doc",
"_source": {
"id": "extra-doc",
"kb_id": "kb-001",
"custom_field": "custom_value",
"another_custom": 123,
}
}
row = converter.convert_document(es_doc)
assert "extra" in row
extra = json.loads(row["extra"])
assert extra["custom_field"] == "custom_value"
assert extra["another_custom"] == 123
def test_convert_kb_id_list(self):
"""Test converting kb_id when it's a list (ES format)."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "kb-list-doc",
"_source": {
"id": "kb-list-doc",
"kb_id": ["kb-001", "kb-002"], # Some ES docs have list
}
}
row = converter.convert_document(es_doc)
# Should take first element
assert row["kb_id"] == "kb-001"
def test_convert_content_with_weight_dict(self):
"""Test converting content_with_weight when it's a dict."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "content-dict-doc",
"_source": {
"id": "content-dict-doc",
"kb_id": "kb-001",
"content_with_weight": {
"text": "Some content",
"weight": 1.0,
},
}
}
row = converter.convert_document(es_doc)
# Dict should be JSON serialized
assert isinstance(row["content_with_weight"], str)
parsed = json.loads(row["content_with_weight"])
assert parsed["text"] == "Some content"
def test_convert_batch(self):
"""Test batch conversion."""
converter = RAGFlowDataConverter()
es_docs = [
{"_id": f"doc-{i}", "_source": {"id": f"doc-{i}", "kb_id": "kb-001"}}
for i in range(5)
]
rows = converter.convert_batch(es_docs)
assert len(rows) == 5
for i, row in enumerate(rows):
assert row["id"] == f"doc-{i}"
class TestVectorFieldPattern:
"""Test vector field pattern matching."""
def test_valid_patterns(self):
"""Test valid vector field patterns."""
valid_names = [
"q_768_vec",
"q_1024_vec",
"q_1536_vec",
"q_3072_vec",
]
for name in valid_names:
match = VECTOR_FIELD_PATTERN.match(name)
assert match is not None, f"Should match: {name}"
def test_invalid_patterns(self):
"""Test invalid vector field patterns."""
invalid_names = [
"q_vec",
"768_vec",
"q_768",
"vector_768",
"content_with_weight",
]
for name in invalid_names:
match = VECTOR_FIELD_PATTERN.match(name)
assert match is None, f"Should not match: {name}"
def test_extract_dimension(self):
"""Test extracting dimension from pattern."""
match = VECTOR_FIELD_PATTERN.match("q_1536_vec")
assert match is not None
assert int(match.group("vector_size")) == 1536
class TestConstants:
"""Test schema constants."""
def test_array_columns(self):
"""Test ARRAY_COLUMNS list."""
expected = [
"important_kwd", "question_kwd", "tag_kwd", "source_id",
"entities_kwd", "position_int", "page_num_int", "top_int"
]
for col in expected:
assert col in ARRAY_COLUMNS, f"Missing array column: {col}"
def test_json_columns(self):
"""Test JSON_COLUMNS list."""
expected = ["tag_feas", "metadata", "extra"]
for col in expected:
assert col in JSON_COLUMNS, f"Missing JSON column: {col}"
def test_ragflow_columns_completeness(self):
"""Test that RAGFLOW_COLUMNS has all required fields."""
required_fields = [
"id", "kb_id", "doc_id", "content_with_weight",
"available_int", "metadata", "extra",
]
for field in required_fields:
assert field in RAGFLOW_COLUMNS, f"Missing required field: {field}"
def test_fts_columns(self):
"""Test fulltext search column lists."""
assert "content_with_weight" in FTS_COLUMNS_ORIGIN
assert "content_ltks" in FTS_COLUMNS_TKS
def test_ragflow_columns_types(self):
"""Test column type definitions."""
# Primary key
assert RAGFLOW_COLUMNS["id"]["is_primary"] is True
assert RAGFLOW_COLUMNS["id"]["nullable"] is False
# Indexed columns
assert RAGFLOW_COLUMNS["kb_id"]["index"] is True
assert RAGFLOW_COLUMNS["doc_id"]["index"] is True
# Array columns
assert RAGFLOW_COLUMNS["important_kwd"]["is_array"] is True
assert RAGFLOW_COLUMNS["question_kwd"]["is_array"] is True
# JSON columns
assert RAGFLOW_COLUMNS["metadata"]["is_json"] is True
assert RAGFLOW_COLUMNS["extra"]["is_json"] is True
class TestRAGFlowSchemaConverterEdgeCases:
"""Test edge cases for RAGFlowSchemaConverter."""
def test_empty_mapping(self):
"""Test analyzing empty mapping."""
converter = RAGFlowSchemaConverter()
analysis = converter.analyze_es_mapping({})
assert analysis["known_fields"] == []
assert analysis["vector_fields"] == []
assert analysis["unknown_fields"] == []
def test_mapping_without_properties(self):
"""Test mapping without properties key."""
converter = RAGFlowSchemaConverter()
analysis = converter.analyze_es_mapping({"some_other_key": {}})
assert analysis["known_fields"] == []
def test_multiple_vector_fields(self):
"""Test detecting multiple vector fields."""
converter = RAGFlowSchemaConverter()
es_mapping = {
"properties": {
"q_768_vec": {"type": "dense_vector", "dims": 768},
"q_1024_vec": {"type": "dense_vector", "dims": 1024},
}
}
analysis = converter.analyze_es_mapping(es_mapping)
assert len(analysis["vector_fields"]) == 2
# First detected should be set
assert converter.detected_vector_size in [768, 1024]
def test_get_column_definitions_without_analysis(self):
"""Test getting columns without prior analysis."""
converter = RAGFlowSchemaConverter()
columns = converter.get_column_definitions()
# Should have all RAGFlow columns but no vector columns
column_names = [c["name"] for c in columns]
assert "id" in column_names
assert "kb_id" in column_names
def test_get_vector_fields(self):
"""Test getting vector fields."""
converter = RAGFlowSchemaConverter()
es_mapping = {
"properties": {
"q_1536_vec": {"type": "dense_vector", "dims": 1536},
}
}
converter.analyze_es_mapping(es_mapping)
vec_fields = converter.get_vector_fields()
assert len(vec_fields) == 1
assert vec_fields[0]["name"] == "q_1536_vec"
assert vec_fields[0]["dimension"] == 1536
class TestRAGFlowDataConverterEdgeCases:
"""Test edge cases for RAGFlowDataConverter."""
def test_convert_empty_document(self):
"""Test converting empty document."""
converter = RAGFlowDataConverter()
es_doc = {"_id": "empty_doc", "_source": {}}
row = converter.convert_document(es_doc)
assert row["id"] == "empty_doc"
def test_convert_document_without_source(self):
"""Test converting document without _source."""
converter = RAGFlowDataConverter()
es_doc = {"_id": "no_source", "id": "no_source", "kb_id": "kb_001"}
row = converter.convert_document(es_doc)
assert row["id"] == "no_source"
assert row["kb_id"] == "kb_001"
def test_convert_boolean_to_integer(self):
"""Test converting boolean to integer."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "bool_doc",
"_source": {
"id": "bool_doc",
"kb_id": "kb_001",
"available_int": True,
}
}
row = converter.convert_document(es_doc)
assert row["available_int"] == 1
def test_convert_invalid_integer(self):
"""Test converting invalid integer value."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "invalid_int",
"_source": {
"id": "invalid_int",
"kb_id": "kb_001",
"available_int": "not_a_number",
}
}
row = converter.convert_document(es_doc)
assert row["available_int"] is None
def test_convert_float_field(self):
"""Test converting float fields."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "float_doc",
"_source": {
"id": "float_doc",
"kb_id": "kb_001",
"weight_flt": 0.85,
"rank_flt": "0.95", # String that should become float
}
}
row = converter.convert_document(es_doc)
assert row["weight_flt"] == 0.85
assert row["rank_flt"] == 0.95
def test_convert_array_with_special_characters(self):
"""Test converting array with special characters."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "special_array",
"_source": {
"id": "special_array",
"kb_id": "kb_001",
"important_kwd": ["key\nwith\nnewlines", "key\twith\ttabs"],
}
}
row = converter.convert_document(es_doc)
# Should be JSON string with escaped characters
assert isinstance(row["important_kwd"], str)
parsed = json.loads(row["important_kwd"])
assert len(parsed) == 2
def test_convert_already_json_array(self):
"""Test converting already JSON-encoded array."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "json_array",
"_source": {
"id": "json_array",
"kb_id": "kb_001",
"important_kwd": '["already", "json"]',
}
}
row = converter.convert_document(es_doc)
assert row["important_kwd"] == '["already", "json"]'
def test_convert_single_value_to_array(self):
"""Test converting single value to array."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "single_to_array",
"_source": {
"id": "single_to_array",
"kb_id": "kb_001",
"important_kwd": "single_keyword",
}
}
row = converter.convert_document(es_doc)
parsed = json.loads(row["important_kwd"])
assert parsed == ["single_keyword"]
def test_detect_vector_fields_from_document(self):
"""Test detecting vector fields from document."""
converter = RAGFlowDataConverter()
doc = {
"q_768_vec": [0.1] * 768,
"q_1024_vec": [0.2] * 1024,
}
converter.detect_vector_fields(doc)
assert "q_768_vec" in converter.vector_fields
assert "q_1024_vec" in converter.vector_fields
def test_convert_with_default_values(self):
"""Test conversion uses default values."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "default_test",
"_source": {
"id": "default_test",
"kb_id": "kb_001",
# available_int not provided, should get default
}
}
row = converter.convert_document(es_doc)
# available_int has default of 1
assert row.get("available_int") == 1
def test_convert_list_content(self):
"""Test converting list content to JSON."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "list_content",
"_source": {
"id": "list_content",
"kb_id": "kb_001",
"content_with_weight": ["part1", "part2", "part3"],
}
}
row = converter.convert_document(es_doc)
assert isinstance(row["content_with_weight"], str)
parsed = json.loads(row["content_with_weight"])
assert parsed == ["part1", "part2", "part3"]
def test_convert_batch_empty(self):
"""Test batch conversion with empty list."""
converter = RAGFlowDataConverter()
rows = converter.convert_batch([])
assert rows == []
def test_existing_extra_field_merged(self):
"""Test that existing extra field is merged with unknown fields."""
converter = RAGFlowDataConverter()
es_doc = {
"_id": "merge_extra",
"_source": {
"id": "merge_extra",
"kb_id": "kb_001",
"extra": {"existing_key": "existing_value"},
"custom_field": "custom_value",
}
}
row = converter.convert_document(es_doc)
# extra should contain both existing and new fields
extra = json.loads(row["extra"])
assert "custom_field" in extra
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/tests/test_schema.py",
"license": "Apache License 2.0",
"lines": 500,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:tools/es-to-oceanbase-migration/tests/test_verify.py | """
Tests for migration verification.
"""
import pytest
from unittest.mock import Mock
from es_ob_migration.verify import MigrationVerifier, VerificationResult
class TestVerificationResult:
"""Test VerificationResult dataclass."""
def test_create_basic_result(self):
"""Test creating a basic result."""
result = VerificationResult(
es_index="ragflow_test",
ob_table="ragflow_test",
)
assert result.es_index == "ragflow_test"
assert result.ob_table == "ragflow_test"
assert result.es_count == 0
assert result.ob_count == 0
assert result.passed is False
def test_result_default_values(self):
"""Test default values."""
result = VerificationResult(
es_index="test",
ob_table="test",
)
assert result.count_match is False
assert result.count_diff == 0
assert result.sample_size == 0
assert result.samples_verified == 0
assert result.samples_matched == 0
assert result.sample_match_rate == 0.0
assert result.missing_in_ob == []
assert result.data_mismatches == []
assert result.message == ""
def test_result_with_counts(self):
"""Test result with count data."""
result = VerificationResult(
es_index="test",
ob_table="test",
es_count=1000,
ob_count=1000,
count_match=True,
)
assert result.es_count == 1000
assert result.ob_count == 1000
assert result.count_match is True
class TestMigrationVerifier:
"""Test MigrationVerifier class."""
@pytest.fixture
def mock_es_client(self):
"""Create mock ES client."""
client = Mock()
client.count_documents = Mock(return_value=100)
client.get_sample_documents = Mock(return_value=[])
return client
@pytest.fixture
def mock_ob_client(self):
"""Create mock OB client."""
client = Mock()
client.count_rows = Mock(return_value=100)
client.get_row_by_id = Mock(return_value=None)
return client
@pytest.fixture
def verifier(self, mock_es_client, mock_ob_client):
"""Create verifier with mock clients."""
return MigrationVerifier(mock_es_client, mock_ob_client)
def test_verify_counts_match(self, mock_es_client, mock_ob_client):
"""Test verification when counts match."""
mock_es_client.count_documents.return_value = 1000
mock_ob_client.count_rows.return_value = 1000
mock_es_client.get_sample_documents.return_value = []
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("ragflow_test", "ragflow_test", sample_size=0)
assert result.es_count == 1000
assert result.ob_count == 1000
assert result.count_match is True
assert result.count_diff == 0
def test_verify_counts_mismatch(self, mock_es_client, mock_ob_client):
"""Test verification when counts don't match."""
mock_es_client.count_documents.return_value = 1000
mock_ob_client.count_rows.return_value = 950
mock_es_client.get_sample_documents.return_value = []
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("ragflow_test", "ragflow_test", sample_size=0)
assert result.es_count == 1000
assert result.ob_count == 950
assert result.count_match is False
assert result.count_diff == 50
def test_verify_samples_all_match(self, mock_es_client, mock_ob_client):
"""Test sample verification when all samples match."""
# Setup ES samples
es_samples = [
{"_id": f"doc_{i}", "id": f"doc_{i}", "kb_id": "kb_001", "content_with_weight": f"content_{i}"}
for i in range(10)
]
mock_es_client.count_documents.return_value = 100
mock_es_client.get_sample_documents.return_value = es_samples
# Setup OB to return matching documents
def get_row(table, doc_id):
return {"id": doc_id, "kb_id": "kb_001", "content_with_weight": f"content_{doc_id.split('_')[1]}"}
mock_ob_client.count_rows.return_value = 100
mock_ob_client.get_row_by_id.side_effect = get_row
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("ragflow_test", "ragflow_test", sample_size=10)
assert result.samples_verified == 10
assert result.samples_matched == 10
assert result.sample_match_rate == 1.0
def test_verify_samples_some_missing(self, mock_es_client, mock_ob_client):
"""Test sample verification when some documents are missing."""
es_samples = [
{"_id": f"doc_{i}", "id": f"doc_{i}", "kb_id": "kb_001"}
for i in range(10)
]
mock_es_client.count_documents.return_value = 100
mock_es_client.get_sample_documents.return_value = es_samples
# Only return some documents
def get_row(table, doc_id):
idx = int(doc_id.split("_")[1])
if idx < 7: # Only return first 7
return {"id": doc_id, "kb_id": "kb_001"}
return None
mock_ob_client.count_rows.return_value = 100
mock_ob_client.get_row_by_id.side_effect = get_row
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("ragflow_test", "ragflow_test", sample_size=10)
assert result.samples_verified == 10
assert result.samples_matched == 7
assert len(result.missing_in_ob) == 3
def test_verify_samples_data_mismatch(self, mock_es_client, mock_ob_client):
"""Test sample verification when data doesn't match."""
es_samples = [
{"_id": "doc_1", "id": "doc_1", "kb_id": "kb_001", "available_int": 1}
]
mock_es_client.count_documents.return_value = 100
mock_es_client.get_sample_documents.return_value = es_samples
# Return document with different data
mock_ob_client.count_rows.return_value = 100
mock_ob_client.get_row_by_id.return_value = {
"id": "doc_1", "kb_id": "kb_002", "available_int": 0 # Different values
}
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("ragflow_test", "ragflow_test", sample_size=1)
assert result.samples_verified == 1
assert result.samples_matched == 0
assert len(result.data_mismatches) == 1
def test_values_equal_none_values(self, verifier):
"""Test value comparison with None values."""
assert verifier._values_equal("field", None, None) is True
assert verifier._values_equal("field", "value", None) is False
assert verifier._values_equal("field", None, "value") is False
def test_values_equal_array_columns(self, verifier):
"""Test value comparison for array columns."""
# Array stored as JSON string in OB
assert verifier._values_equal(
"important_kwd",
["key1", "key2"],
'["key1", "key2"]'
) is True
# Order shouldn't matter for arrays
assert verifier._values_equal(
"important_kwd",
["key2", "key1"],
'["key1", "key2"]'
) is True
def test_values_equal_json_columns(self, verifier):
"""Test value comparison for JSON columns."""
assert verifier._values_equal(
"metadata",
{"author": "John"},
'{"author": "John"}'
) is True
def test_values_equal_kb_id_list(self, verifier):
"""Test kb_id comparison when ES has list."""
# ES sometimes stores kb_id as list
assert verifier._values_equal(
"kb_id",
["kb_001", "kb_002"],
"kb_001"
) is True
def test_values_equal_content_with_weight_dict(self, verifier):
"""Test content_with_weight comparison when OB has JSON string."""
assert verifier._values_equal(
"content_with_weight",
{"text": "content", "weight": 1.0},
'{"text": "content", "weight": 1.0}'
) is True
def test_determine_result_passed(self, mock_es_client, mock_ob_client):
"""Test result determination for passed verification."""
mock_es_client.count_documents.return_value = 1000
mock_ob_client.count_rows.return_value = 1000
es_samples = [{"_id": f"doc_{i}", "id": f"doc_{i}", "kb_id": "kb_001"} for i in range(100)]
mock_es_client.get_sample_documents.return_value = es_samples
mock_ob_client.get_row_by_id.side_effect = lambda t, d: {"id": d, "kb_id": "kb_001"}
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("test", "test", sample_size=100)
assert result.passed is True
assert "PASSED" in result.message
def test_determine_result_failed_count(self, mock_es_client, mock_ob_client):
"""Test result determination when count verification fails."""
mock_es_client.count_documents.return_value = 1000
mock_ob_client.count_rows.return_value = 500 # Big difference
mock_es_client.get_sample_documents.return_value = []
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("test", "test", sample_size=0)
assert result.passed is False
assert "FAILED" in result.message
def test_determine_result_failed_samples(self, mock_es_client, mock_ob_client):
"""Test result determination when sample verification fails."""
mock_es_client.count_documents.return_value = 100
mock_ob_client.count_rows.return_value = 100
es_samples = [{"_id": f"doc_{i}", "id": f"doc_{i}"} for i in range(10)]
mock_es_client.get_sample_documents.return_value = es_samples
mock_ob_client.get_row_by_id.return_value = None # All missing
verifier = MigrationVerifier(mock_es_client, mock_ob_client)
result = verifier.verify("test", "test", sample_size=10)
assert result.passed is False
def test_generate_report(self, verifier):
"""Test report generation."""
result = VerificationResult(
es_index="ragflow_test",
ob_table="ragflow_test",
es_count=1000,
ob_count=1000,
count_match=True,
count_diff=0,
sample_size=100,
samples_verified=100,
samples_matched=100,
sample_match_rate=1.0,
passed=True,
message="Verification PASSED",
)
report = verifier.generate_report(result)
assert "ragflow_test" in report
assert "1,000" in report
assert "PASSED" in report
assert "100.00%" in report
def test_generate_report_with_missing(self, verifier):
"""Test report generation with missing documents."""
result = VerificationResult(
es_index="test",
ob_table="test",
es_count=100,
ob_count=95,
count_match=False,
count_diff=5,
sample_size=10,
samples_verified=10,
samples_matched=8,
sample_match_rate=0.8,
missing_in_ob=["doc_1", "doc_2"],
passed=False,
message="Verification FAILED",
)
report = verifier.generate_report(result)
assert "Missing in OceanBase" in report
assert "doc_1" in report
assert "FAILED" in report
def test_generate_report_with_mismatches(self, verifier):
"""Test report generation with data mismatches."""
result = VerificationResult(
es_index="test",
ob_table="test",
es_count=100,
ob_count=100,
count_match=True,
sample_size=10,
samples_verified=10,
samples_matched=8,
sample_match_rate=0.8,
data_mismatches=[
{
"id": "doc_1",
"differences": [
{"field": "kb_id", "es_value": "kb_001", "ob_value": "kb_002"}
]
}
],
passed=False,
message="Verification FAILED",
)
report = verifier.generate_report(result)
assert "Data Mismatches" in report
assert "doc_1" in report
assert "kb_id" in report
class TestValueComparison:
"""Test value comparison edge cases."""
@pytest.fixture
def verifier(self):
"""Create verifier with mock clients."""
return MigrationVerifier(Mock(), Mock())
def test_string_comparison(self, verifier):
"""Test string comparison."""
assert verifier._values_equal("field", "value", "value") is True
assert verifier._values_equal("field", "value1", "value2") is False
def test_integer_comparison(self, verifier):
"""Test integer comparison (converted to string)."""
assert verifier._values_equal("field", 123, "123") is True
assert verifier._values_equal("field", "123", 123) is True
def test_float_comparison(self, verifier):
"""Test float comparison."""
assert verifier._values_equal("field", 1.5, "1.5") is True
def test_boolean_comparison(self, verifier):
"""Test boolean comparison."""
assert verifier._values_equal("field", True, "True") is True
assert verifier._values_equal("field", False, "False") is True
def test_empty_array_comparison(self, verifier):
"""Test empty array comparison."""
assert verifier._values_equal("important_kwd", [], "[]") is True
def test_nested_json_comparison(self, verifier):
"""Test nested JSON comparison."""
es_value = {"nested": {"key": "value"}}
ob_value = '{"nested": {"key": "value"}}'
assert verifier._values_equal("metadata", es_value, ob_value) is True
| {
"repo_id": "infiniflow/ragflow",
"file_path": "tools/es-to-oceanbase-migration/tests/test_verify.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/utils/test_oceanbase_health.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for OceanBase health check and performance monitoring functionality.
"""
import inspect
import os
import types
import pytest
from unittest.mock import Mock, patch
from api.utils.health_utils import get_oceanbase_status, check_oceanbase_health
class TestOceanBaseHealthCheck:
"""Test cases for OceanBase health check functionality."""
@patch('api.utils.health_utils.OBConnection')
@patch.dict(os.environ, {'DOC_ENGINE': 'oceanbase'})
def test_get_oceanbase_status_success(self, mock_ob_class):
"""Test successful OceanBase status retrieval."""
# Setup mock
mock_ob_connection = Mock()
mock_ob_connection.uri = "localhost:2881"
mock_ob_connection.health.return_value = {
"uri": "localhost:2881",
"version_comment": "OceanBase 4.3.5.1",
"status": "healthy",
"connection": "connected"
}
mock_ob_connection.get_performance_metrics.return_value = {
"connection": "connected",
"latency_ms": 5.2,
"storage_used": "1.2MB",
"storage_total": "100GB",
"query_per_second": 150,
"slow_queries": 2,
"active_connections": 10,
"max_connections": 300
}
mock_ob_class.return_value = mock_ob_connection
# Execute
result = get_oceanbase_status()
# Assert
assert result["status"] == "alive"
assert "message" in result
assert "health" in result["message"]
assert "performance" in result["message"]
assert result["message"]["health"]["status"] == "healthy"
assert result["message"]["performance"]["latency_ms"] == 5.2
@patch.dict(os.environ, {'DOC_ENGINE': 'elasticsearch'})
def test_get_oceanbase_status_not_configured(self):
"""Test OceanBase status when not configured."""
with pytest.raises(Exception) as exc_info:
get_oceanbase_status()
assert "OceanBase is not in use" in str(exc_info.value)
@patch('api.utils.health_utils.OBConnection')
@patch.dict(os.environ, {'DOC_ENGINE': 'oceanbase'})
def test_get_oceanbase_status_connection_error(self, mock_ob_class):
"""Test OceanBase status when connection fails."""
mock_ob_class.side_effect = Exception("Connection failed")
result = get_oceanbase_status()
assert result["status"] == "timeout"
assert "error" in result["message"]
@patch('api.utils.health_utils.OBConnection')
@patch.dict(os.environ, {'DOC_ENGINE': 'oceanbase'})
def test_check_oceanbase_health_healthy(self, mock_ob_class):
"""Test OceanBase health check returns healthy status."""
mock_ob_connection = Mock()
mock_ob_connection.health.return_value = {
"uri": "localhost:2881",
"version_comment": "OceanBase 4.3.5.1",
"status": "healthy",
"connection": "connected"
}
mock_ob_connection.get_performance_metrics.return_value = {
"connection": "connected",
"latency_ms": 5.2,
"storage_used": "1.2MB",
"storage_total": "100GB",
"query_per_second": 150,
"slow_queries": 0,
"active_connections": 10,
"max_connections": 300
}
mock_ob_class.return_value = mock_ob_connection
result = check_oceanbase_health()
assert result["status"] == "healthy"
assert result["details"]["connection"] == "connected"
assert result["details"]["latency_ms"] == 5.2
assert result["details"]["query_per_second"] == 150
@patch('api.utils.health_utils.OBConnection')
@patch.dict(os.environ, {'DOC_ENGINE': 'oceanbase'})
def test_check_oceanbase_health_degraded(self, mock_ob_class):
"""Test OceanBase health check returns degraded status for high latency."""
mock_ob_connection = Mock()
mock_ob_connection.health.return_value = {
"uri": "localhost:2881",
"version_comment": "OceanBase 4.3.5.1",
"status": "healthy",
"connection": "connected"
}
mock_ob_connection.get_performance_metrics.return_value = {
"connection": "connected",
"latency_ms": 1500.0, # High latency > 1000ms
"storage_used": "1.2MB",
"storage_total": "100GB",
"query_per_second": 50,
"slow_queries": 5,
"active_connections": 10,
"max_connections": 300
}
mock_ob_class.return_value = mock_ob_connection
result = check_oceanbase_health()
assert result["status"] == "degraded"
assert result["details"]["latency_ms"] == 1500.0
@patch('api.utils.health_utils.OBConnection')
@patch.dict(os.environ, {'DOC_ENGINE': 'oceanbase'})
def test_check_oceanbase_health_unhealthy(self, mock_ob_class):
"""Test OceanBase health check returns unhealthy status."""
mock_ob_connection = Mock()
mock_ob_connection.health.return_value = {
"uri": "localhost:2881",
"status": "unhealthy",
"connection": "disconnected",
"error": "Connection timeout"
}
mock_ob_connection.get_performance_metrics.return_value = {
"connection": "disconnected",
"error": "Connection timeout"
}
mock_ob_class.return_value = mock_ob_connection
result = check_oceanbase_health()
assert result["status"] == "unhealthy"
assert result["details"]["connection"] == "disconnected"
assert "error" in result["details"]
@patch.dict(os.environ, {'DOC_ENGINE': 'elasticsearch'})
def test_check_oceanbase_health_not_configured(self):
"""Test OceanBase health check when not configured."""
result = check_oceanbase_health()
assert result["status"] == "not_configured"
assert result["details"]["connection"] == "not_configured"
assert "not configured" in result["details"]["message"].lower()
class TestOBConnectionPerformanceMetrics:
"""Test cases for OBConnection performance metrics methods."""
def _create_mock_connection(self):
"""Create a mock OBConnection with actual methods."""
# Create a simple object and bind the real methods to it
class MockConn:
pass
conn = MockConn()
# Get the actual class from the singleton wrapper's closure
from rag.utils import ob_conn
# OBConnection is wrapped by @singleton decorator, so it's a function
# The original class is stored in the closure of the singleton function
# Find the class by checking all closure cells
ob_connection_class = None
if hasattr(ob_conn.OBConnection, '__closure__') and ob_conn.OBConnection.__closure__:
for cell in ob_conn.OBConnection.__closure__:
cell_value = cell.cell_contents
if inspect.isclass(cell_value):
ob_connection_class = cell_value
break
if ob_connection_class is None:
raise ValueError("Could not find OBConnection class in closure")
# Bind the actual methods to our mock object
conn.get_performance_metrics = types.MethodType(ob_connection_class.get_performance_metrics, conn)
conn._get_storage_info = types.MethodType(ob_connection_class._get_storage_info, conn)
conn._get_connection_pool_stats = types.MethodType(ob_connection_class._get_connection_pool_stats, conn)
conn._get_slow_query_count = types.MethodType(ob_connection_class._get_slow_query_count, conn)
conn._estimate_qps = types.MethodType(ob_connection_class._estimate_qps, conn)
return conn
def test_get_performance_metrics_success(self):
"""Test successful retrieval of performance metrics."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.uri = "localhost:2881"
conn.db_name = "test"
# Mock client methods - create separate mock results for each call
mock_result1 = Mock()
mock_result1.fetchone.return_value = (1,)
mock_result2 = Mock()
mock_result2.fetchone.return_value = (100.5,)
mock_result3 = Mock()
mock_result3.fetchone.return_value = (100.0,)
mock_result4 = Mock()
mock_result4.fetchall.return_value = [
(1, 'user', 'host', 'db', 'Query', 0, 'executing', 'SELECT 1')
]
mock_result4.fetchone.return_value = ('max_connections', '300')
mock_result5 = Mock()
mock_result5.fetchone.return_value = (0,)
mock_result6 = Mock()
mock_result6.fetchone.return_value = (5,)
# Setup side_effect to return different mocks for different queries
def sql_side_effect(query):
if "SELECT 1" in query:
return mock_result1
elif "information_schema.tables" in query:
return mock_result2
elif "__all_disk_stat" in query:
return mock_result3
elif "SHOW PROCESSLIST" in query:
return mock_result4
elif "SHOW VARIABLES LIKE 'max_connections'" in query:
return mock_result4
elif "information_schema.processlist" in query and "time >" in query:
return mock_result5
elif "information_schema.processlist" in query and "COUNT" in query:
return mock_result6
return Mock()
mock_client.perform_raw_text_sql.side_effect = sql_side_effect
mock_client.pool_size = 300
# Mock logger
import logging
conn.logger = logging.getLogger('test')
result = conn.get_performance_metrics()
assert result["connection"] == "connected"
assert result["latency_ms"] >= 0
assert "storage_used" in result
assert "storage_total" in result
def test_get_performance_metrics_connection_error(self):
"""Test performance metrics when connection fails."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.uri = "localhost:2881"
conn.logger = Mock()
mock_client.perform_raw_text_sql.side_effect = Exception("Connection failed")
result = conn.get_performance_metrics()
assert result["connection"] == "disconnected"
assert "error" in result
def test_get_storage_info_success(self):
"""Test successful retrieval of storage information."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.db_name = "test"
conn.logger = Mock()
mock_result1 = Mock()
mock_result1.fetchone.return_value = (100.5,)
mock_result2 = Mock()
mock_result2.fetchone.return_value = (100.0,)
def sql_side_effect(query):
if "information_schema.tables" in query:
return mock_result1
elif "__all_disk_stat" in query:
return mock_result2
return Mock()
mock_client.perform_raw_text_sql.side_effect = sql_side_effect
result = conn._get_storage_info()
assert "storage_used" in result
assert "storage_total" in result
assert "MB" in result["storage_used"]
def test_get_storage_info_fallback(self):
"""Test storage info with fallback when total space unavailable."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.db_name = "test"
conn.logger = Mock()
# First query succeeds, second fails
def side_effect(query):
if "information_schema.tables" in query:
mock_result = Mock()
mock_result.fetchone.return_value = (100.5,)
return mock_result
else:
raise Exception("Table not found")
mock_client.perform_raw_text_sql.side_effect = side_effect
result = conn._get_storage_info()
assert "storage_used" in result
assert "storage_total" in result
def test_get_connection_pool_stats(self):
"""Test retrieval of connection pool statistics."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.logger = Mock()
mock_client.pool_size = 300
mock_result1 = Mock()
mock_result1.fetchall.return_value = [
(1, 'user', 'host', 'db', 'Query', 0, 'executing', 'SELECT 1'),
(2, 'user', 'host', 'db', 'Sleep', 10, None, None)
]
mock_result2 = Mock()
mock_result2.fetchone.return_value = ('max_connections', '300')
def sql_side_effect(query):
if "SHOW PROCESSLIST" in query:
return mock_result1
elif "SHOW VARIABLES LIKE 'max_connections'" in query:
return mock_result2
return Mock()
mock_client.perform_raw_text_sql.side_effect = sql_side_effect
result = conn._get_connection_pool_stats()
assert "active_connections" in result
assert "max_connections" in result
assert result["active_connections"] >= 0
def test_get_slow_query_count(self):
"""Test retrieval of slow query count."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.logger = Mock()
mock_result = Mock()
mock_result.fetchone.return_value = (5,)
mock_client.perform_raw_text_sql.return_value = mock_result
result = conn._get_slow_query_count(threshold_seconds=1)
assert isinstance(result, int)
assert result >= 0
def test_estimate_qps(self):
"""Test QPS estimation."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.logger = Mock()
mock_result = Mock()
mock_result.fetchone.return_value = (10,)
mock_client.perform_raw_text_sql.return_value = mock_result
result = conn._estimate_qps()
assert isinstance(result, int)
assert result >= 0
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/utils/test_oceanbase_health.py",
"license": "Apache License 2.0",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/common/test_metadata_filter_operators.py | from common.metadata_utils import meta_filter
def test_contains():
# returns chunk where the metadata contains the value
metas = {"version": {"hello earth": ["doc1"], "hello mars": ["doc2"]}}
filters = [{"key": "version", "op": "contains", "value": "earth"}]
assert meta_filter(metas, filters) == ["doc1"]
def test_not_contains():
# returns chunk where the metadata does not contain the value
metas = {"version": {"hello earth": ["doc1"], "hello mars": ["doc2"]}}
filters = [{"key": "version", "op": "not contains", "value": "earth"}]
assert meta_filter(metas, filters) == ["doc2"]
def test_in_operator():
# returns chunk where the metadata is in the value
metas = {"status": {"active": ["doc1"], "pending": ["doc2"], "done": ["doc3"]}}
filters = [{"key": "status", "op": "in", "value": "active,pending"}]
assert set(meta_filter(metas, filters)) == {"doc1", "doc2"}
def test_not_in_operator():
# returns chunk where the metadata is not in the value
metas = {"status": {"active": ["doc1"], "pending": ["doc2"], "done": ["doc3"]}}
filters = [{"key": "status", "op": "not in", "value": "active,pending"}]
assert meta_filter(metas, filters) == ["doc3"]
def test_start_with():
# returns chunk where the metadata starts with the value
metas = {"name": {"prefix_value": ["doc1"], "other": ["doc2"]}}
filters = [{"key": "name", "op": "start with", "value": "pre"}]
assert meta_filter(metas, filters) == ["doc1"]
def test_end_with():
# returns chunk where the metadata ends with the value
metas = {"file": {"report.pdf": ["doc1"], "image.png": ["doc2"]}}
filters = [{"key": "file", "op": "end with", "value": ".pdf"}]
assert meta_filter(metas, filters) == ["doc1"]
def test_empty():
# returns chunk where the metadata is empty
metas = {"notes": {"": ["doc1"], "non-empty": ["doc2"]}}
filters = [{"key": "notes", "op": "empty", "value": ""}]
assert meta_filter(metas, filters) == ["doc1"]
def test_not_empty():
# returns chunk where the metadata is not empty
metas = {"notes": {"": ["doc1"], "non-empty": ["doc2"]}}
filters = [{"key": "notes", "op": "not empty", "value": ""}]
assert meta_filter(metas, filters) == ["doc2"]
def test_equal():
# returns chunk where the metadata is equal to the value
metas = {"score": {"5": ["doc1"], "6": ["doc2"]}}
filters = [{"key": "score", "op": "=", "value": "5"}]
assert meta_filter(metas, filters) == ["doc1"]
def test_not_equal():
# returns chunk where the metadata is not equal to the value
metas = {"score": {"5": ["doc1"], "6": ["doc2"]}}
filters = [{"key": "score", "op": "≠", "value": "5"}]
assert meta_filter(metas, filters) == ["doc2"]
def test_greater_than():
# returns chunk where the metadata is greater than the value
metas = {"score": {"10": ["doc1"], "2": ["doc2"]}}
filters = [{"key": "score", "op": ">", "value": "5"}]
assert meta_filter(metas, filters) == ["doc1"]
def test_less_than():
# returns chunk where the metadata is less than the value
metas = {"score": {"10": ["doc1"], "2": ["doc2"]}}
filters = [{"key": "score", "op": "<", "value": "5"}]
assert meta_filter(metas, filters) == ["doc2"]
def test_greater_than_or_equal():
# returns chunk where the metadata is greater than or equal to the value
metas = {"score": {"5": ["doc1"], "6": ["doc2"], "4": ["doc3"]}}
filters = [{"key": "score", "op": "≥", "value": "5"}]
assert set(meta_filter(metas, filters)) == {"doc1", "doc2"}
def test_less_than_or_equal():
# returns chunk where the metadata is less than or equal to the value
metas = {"score": {"5": ["doc1"], "6": ["doc2"], "4": ["doc3"]}}
filters = [{"key": "score", "op": "≤", "value": "5"}]
assert set(meta_filter(metas, filters)) == {"doc1", "doc3"}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_metadata_filter_operators.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/db/services/doc_metadata_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Document Metadata Service
Manages document-level metadata storage in ES/Infinity.
This is the SOLE source of truth for document metadata - MySQL meta_fields column has been removed.
"""
import json
import logging
import re
from copy import deepcopy
from typing import Dict, List, Optional
from api.db.db_models import DB, Document
from common import settings
from common.metadata_utils import dedupe_list
from api.db.db_models import Knowledgebase
from common.doc_store.doc_store_base import OrderByExpr
class DocMetadataService:
"""Service for managing document metadata in ES/Infinity"""
@staticmethod
def _get_doc_meta_index_name(tenant_id: str) -> str:
"""
Get the index name for document metadata.
Args:
tenant_id: Tenant ID
Returns:
Index name for document metadata
"""
return f"ragflow_doc_meta_{tenant_id}"
@staticmethod
def _extract_metadata(flat_meta: Dict) -> Dict:
"""
Extract metadata from ES/Infinity document format.
Args:
flat_meta: Raw document from ES/Infinity with meta_fields field
Returns:
Simple metadata dictionary
"""
if not flat_meta or not isinstance(flat_meta, dict):
return {}
meta_fields = flat_meta.get('meta_fields')
if not meta_fields:
return {}
# Parse JSON string if needed
if isinstance(meta_fields, str):
import json
try:
return json.loads(meta_fields)
except json.JSONDecodeError:
return {}
# Already a dict, return as-is
if isinstance(meta_fields, dict):
return meta_fields
return {}
@staticmethod
def _extract_doc_id(doc: Dict, hit: Dict = None) -> str:
"""
Extract document ID from various formats.
Args:
doc: Document dictionary (from DataFrame or list format)
hit: Hit dictionary (from ES format with _id field)
Returns:
Document ID or empty string
"""
if hit:
# ES format: doc is in _source, id is in _id
return hit.get('_id', '')
# DataFrame or list format: check multiple possible fields
return doc.get("doc_id") or doc.get("_id") or doc.get("id", "")
@classmethod
def _iter_search_results(cls, results):
"""
Iterate over search results in various formats (DataFrame, ES, OceanBase, list).
Yields:
Tuple of (doc_id, doc_dict) for each document
Args:
results: Search results from ES/Infinity/OceanBase in any format
"""
# Handle tuple return from Infinity: (DataFrame, int)
# Check this FIRST because pandas DataFrames also have __getitem__
if isinstance(results, tuple) and len(results) == 2:
results = results[0] # Extract DataFrame from tuple
# Check if results is a pandas DataFrame (from Infinity)
if hasattr(results, 'iterrows'):
# Handle pandas DataFrame - use iterrows() to iterate over rows
for _, row in results.iterrows():
doc = dict(row) # Convert Series to dict
doc_id = cls._extract_doc_id(doc)
if doc_id:
yield doc_id, doc
# Check if ES format (has 'hits' key)
# Note: ES returns ObjectApiResponse which is dict-like but not isinstance(dict)
elif hasattr(results, 'get') and 'hits' in results:
# ES format: {"hits": {"hits": [{"_source": {...}, "_id": "..."}]}}
hits = results.get('hits', {}).get('hits', [])
for hit in hits:
doc = hit.get('_source', {})
doc_id = cls._extract_doc_id(doc, hit)
if doc_id:
yield doc_id, doc
# Handle list of dicts or other formats
elif isinstance(results, list):
for res in results:
if isinstance(res, dict):
docs = [res]
else:
docs = res
for doc in docs:
doc_id = cls._extract_doc_id(doc)
if doc_id:
yield doc_id, doc
# Check if OceanBase SearchResult format
elif hasattr(results, 'chunks') and hasattr(results, 'total'):
# OceanBase format: SearchResult(total=int, chunks=[{...}, {...}])
for doc in results.chunks:
doc_id = cls._extract_doc_id(doc)
if doc_id:
yield doc_id, doc
@classmethod
def _search_metadata(cls, kb_id: str, condition: Dict = None):
"""
Common search logic for metadata queries.
Uses pagination internally to retrieve ALL data from the index.
Args:
kb_id: Knowledge base ID
condition: Optional search condition (defaults to {"kb_id": kb_id})
Returns:
Search results from ES/Infinity, or empty list if index doesn't exist
"""
kb = Knowledgebase.get_by_id(kb_id)
if not kb:
return []
tenant_id = kb.tenant_id
index_name = cls._get_doc_meta_index_name(tenant_id)
# Check if metadata index exists, create if it doesn't
if not settings.docStoreConn.index_exist(index_name, ""):
logging.debug(f"Metadata index {index_name} does not exist, creating it")
result = settings.docStoreConn.create_doc_meta_idx(index_name)
if result is False:
logging.error(f"Failed to create metadata index {index_name}")
return []
logging.debug(f"Successfully created metadata index {index_name}")
if condition is None:
condition = {"kb_id": kb_id}
order_by = OrderByExpr()
page_size = 1000
all_results = []
page = 0
while True:
results = settings.docStoreConn.search(
select_fields=["*"],
highlight_fields=[],
condition=condition,
match_expressions=[],
order_by=order_by,
offset=page * page_size,
limit=page_size,
index_names=index_name,
knowledgebase_ids=[kb_id]
)
# Handle different result formats
if results is None:
break
# Extract docs from results
page_docs = []
total_count = None # Used for Infinity to determine if more results exist
# Check for Infinity format first (DataFrame, total) tuple
if isinstance(results, tuple) and len(results) == 2:
df, total_count = results
if hasattr(df, 'iterrows'):
# Pandas DataFrame from Infinity
page_docs = df.to_dict('records')
else:
page_docs = list(df) if df else []
# Check for ES format (dict with 'hits' key)
elif hasattr(results, 'get') and 'hits' in results:
hits_obj = results.get('hits', {})
hits = hits_obj.get('hits', [])
page_docs = []
for hit in hits:
doc = hit.get('_source', {})
doc['id'] = hit.get('_id', '') # Add _id as 'id' for _extract_doc_id to work
page_docs.append(doc)
# Extract total count from ES response
total_hits = hits_obj.get('total', {})
if isinstance(total_hits, dict):
total_count = total_hits.get('value', len(page_docs))
else:
total_count = total_hits if total_hits else len(page_docs)
# Handle list/iterable results
elif hasattr(results, '__iter__') and not isinstance(results, dict):
page_docs = list(results)
else:
page_docs = []
if not page_docs:
break
all_results.extend(page_docs)
page += 1
# Determine if there are more results to fetch
# For Infinity: use total_count if available
if total_count is not None:
if len(all_results) >= total_count:
break
else:
# For ES or other: check if we got fewer than page_size
if len(page_docs) < page_size:
break
logging.debug(f"[_search_metadata] Retrieved {len(all_results)} total results for kb_id: {kb_id}")
return all_results
@classmethod
def _split_combined_values(cls, meta_fields: Dict) -> Dict:
"""
Post-process metadata to split combined values by common delimiters.
For example: "关羽、孙权、张辽" -> ["关羽", "孙权", "张辽"]
This fixes LLM extraction where multiple values are extracted as one combined value.
Also removes duplicates after splitting.
Args:
meta_fields: Metadata dictionary
Returns:
Processed metadata with split values
"""
if not meta_fields or not isinstance(meta_fields, dict):
return meta_fields
processed = {}
for key, value in meta_fields.items():
if isinstance(value, list):
# Process each item in the list
new_values = []
for item in value:
if isinstance(item, str):
# Split by common delimiters: Chinese comma (、), regular comma (,), pipe (|), semicolon (;), Chinese semicolon (;)
# Also handle mixed delimiters and spaces
split_items = re.split(r'[、,,;;|]+', item.strip())
# Trim whitespace and filter empty strings
split_items = [s.strip() for s in split_items if s.strip()]
if split_items:
new_values.extend(split_items)
else:
# Keep original if no split happened
new_values.append(item)
else:
new_values.append(item)
# Remove duplicates while preserving order.
# Use string-based dedupe to support unhashable values (e.g. dict entries).
processed[key] = dedupe_list(new_values)
else:
processed[key] = value
if processed != meta_fields:
logging.debug(f"[METADATA SPLIT] Split combined values: {meta_fields} -> {processed}")
return processed
@classmethod
@DB.connection_context()
def insert_document_metadata(cls, doc_id: str, meta_fields: Dict) -> bool:
"""
Insert document metadata into ES/Infinity.
Args:
doc_id: Document ID
meta_fields: Metadata dictionary
Returns:
True if successful, False otherwise
"""
try:
# Get document with tenant_id (need to join with Knowledgebase)
doc_query = Document.select(Document, Knowledgebase.tenant_id).join(
Knowledgebase, on=(Knowledgebase.id == Document.kb_id)
).where(Document.id == doc_id)
doc = doc_query.first()
if not doc:
logging.warning(f"Document {doc_id} not found for metadata insertion")
return False
# Extract document fields
doc_obj = doc # This is the Document object
tenant_id = doc.knowledgebase.tenant_id # Get tenant_id from joined Knowledgebase
kb_id = doc_obj.kb_id
# Prepare metadata document
doc_meta = {
"id": doc_obj.id,
"kb_id": kb_id,
}
# Store metadata as JSON object in meta_fields column (same as MySQL structure)
if meta_fields:
# Post-process to split combined values by common delimiters
meta_fields = cls._split_combined_values(meta_fields)
doc_meta["meta_fields"] = meta_fields
else:
doc_meta["meta_fields"] = {}
# Ensure index/table exists (per-tenant for both ES and Infinity)
index_name = cls._get_doc_meta_index_name(tenant_id)
# Check if table exists
table_exists = settings.docStoreConn.index_exist(index_name, kb_id)
logging.debug(f"Metadata table exists check: {index_name} -> {table_exists}")
# Create index if it doesn't exist
if not table_exists:
logging.debug(f"Creating metadata table: {index_name}")
# Both ES and Infinity now use per-tenant metadata tables
result = settings.docStoreConn.create_doc_meta_idx(index_name)
logging.debug(f"Table creation result: {result}")
if result is False:
logging.error(f"Failed to create metadata table {index_name}")
return False
else:
logging.debug(f"Metadata table already exists: {index_name}")
# Insert into ES/Infinity
result = settings.docStoreConn.insert(
[doc_meta],
index_name,
kb_id
)
if result:
logging.error(f"Failed to insert metadata for document {doc_id}: {result}")
return False
# Force ES refresh to make metadata immediately available for search
if not settings.DOC_ENGINE_INFINITY:
try:
settings.docStoreConn.es.indices.refresh(index=index_name)
logging.debug(f"Refreshed metadata index: {index_name}")
except Exception as e:
logging.warning(f"Failed to refresh metadata index {index_name}: {e}")
logging.debug(f"Successfully inserted metadata for document {doc_id}")
return True
except Exception as e:
logging.error(f"Error inserting metadata for document {doc_id}: {e}")
return False
@classmethod
@DB.connection_context()
def update_document_metadata(cls, doc_id: str, meta_fields: Dict) -> bool:
"""
Update document metadata in ES/Infinity.
For Elasticsearch: Uses partial update to directly update the meta_fields field.
For Infinity: Falls back to delete+insert (Infinity doesn't support partial updates well).
Args:
doc_id: Document ID
meta_fields: Metadata dictionary
Returns:
True if successful, False otherwise
"""
try:
# Get document with tenant_id
doc_query = Document.select(Document, Knowledgebase.tenant_id).join(
Knowledgebase, on=(Knowledgebase.id == Document.kb_id)
).where(Document.id == doc_id)
doc = doc_query.first()
if not doc:
logging.warning(f"Document {doc_id} not found for metadata update")
return False
# Extract fields
doc_obj = doc
tenant_id = doc.knowledgebase.tenant_id
kb_id = doc_obj.kb_id
index_name = cls._get_doc_meta_index_name(tenant_id)
# Post-process to split combined values
processed_meta = cls._split_combined_values(meta_fields)
logging.debug(f"[update_document_metadata] Updating doc_id: {doc_id}, kb_id: {kb_id}, meta_fields: {processed_meta}")
# For Elasticsearch, use efficient partial update
if not settings.DOC_ENGINE_INFINITY and not settings.DOC_ENGINE_OCEANBASE:
# Check if index exists first
index_exists = settings.docStoreConn.index_exist(index_name, "")
if not index_exists:
# Index doesn't exist - create it and insert directly
logging.debug(f"[update_document_metadata] Index {index_name} does not exist, creating and inserting")
result = settings.docStoreConn.create_doc_meta_idx(index_name)
if result is False:
logging.error(f"Failed to create metadata index {index_name}")
return False
return cls.insert_document_metadata(doc_id, processed_meta)
# Index exists - check if document exists
try:
doc_exists = settings.docStoreConn.get(
index_name=index_name,
id=doc_id,
kb_id=kb_id
)
if doc_exists:
# Document exists - use partial update
settings.docStoreConn.es.update(
index=index_name,
id=doc_id,
refresh=True,
doc={"meta_fields": processed_meta}
)
logging.debug(f"Successfully updated metadata for document {doc_id} using ES partial update")
return True
except Exception as e:
logging.debug(f"Document {doc_id} not found in index, will insert: {e}")
# Document doesn't exist - insert new
logging.debug(f"[update_document_metadata] Document {doc_id} not found, inserting new")
return cls.insert_document_metadata(doc_id, processed_meta)
# For Infinity or as fallback: use delete+insert
logging.debug(f"[update_document_metadata] Using delete+insert method for doc_id: {doc_id}")
cls.delete_document_metadata(doc_id, skip_empty_check=True)
return cls.insert_document_metadata(doc_id, processed_meta)
except Exception as e:
logging.error(f"Error updating metadata for document {doc_id}: {e}")
return False
@classmethod
@DB.connection_context()
def delete_document_metadata(cls, doc_id: str, skip_empty_check: bool = False) -> bool:
"""
Delete document metadata from ES/Infinity.
Also drops the metadata table if it becomes empty (efficiently).
If document has no metadata in the table, this is a no-op.
Args:
doc_id: Document ID
skip_empty_check: If True, skip checking/dropping empty table (for bulk deletions)
Returns:
True if successful (or no metadata to delete), False otherwise
"""
try:
logging.debug(f"[METADATA DELETE] Starting metadata deletion for document: {doc_id}")
# Get document with tenant_id
doc_query = Document.select(Document, Knowledgebase.tenant_id).join(
Knowledgebase, on=(Knowledgebase.id == Document.kb_id)
).where(Document.id == doc_id)
doc = doc_query.first()
if not doc:
logging.warning(f"Document {doc_id} not found for metadata deletion")
return False
tenant_id = doc.knowledgebase.tenant_id
kb_id = doc.kb_id
index_name = cls._get_doc_meta_index_name(tenant_id)
logging.debug(f"[delete_document_metadata] Deleting doc_id: {doc_id}, kb_id: {kb_id}, index: {index_name}")
# Check if metadata table exists before attempting deletion
# This is the key optimization - no table = no metadata = nothing to delete
if not settings.docStoreConn.index_exist(index_name, ""):
logging.debug(f"Metadata table {index_name} does not exist, skipping metadata deletion for document {doc_id}")
return True # No metadata to delete is considered success
# Try to get the metadata to confirm it exists before deleting
# This is more efficient than attempting delete on non-existent records
try:
existing_metadata = settings.docStoreConn.get(
doc_id,
index_name,
[""] # Empty list for metadata tables
)
logging.debug(f"[METADATA DELETE] Get result: {existing_metadata is not None}")
if not existing_metadata:
logging.debug(f"[METADATA DELETE] Document {doc_id} has no metadata in table, skipping deletion")
# Only check/drop table if not skipped (tenant deletion will handle it)
if not skip_empty_check:
cls._drop_empty_metadata_table(index_name, tenant_id)
return True # No metadata to delete is success
except Exception as e:
# If get fails, document might not exist in metadata table, which is fine
logging.error(f"[METADATA DELETE] Get failed: {e}")
# Continue to check/drop table if needed
# Delete from ES/Infinity (only if metadata exists)
# For metadata tables, pass kb_id for the delete operation
# The delete() method will detect it's a metadata table and skip the kb_id filter
logging.debug(f"[METADATA DELETE] Deleting metadata with condition: {{'id': '{doc_id}'}}")
deleted_count = settings.docStoreConn.delete(
{"id": doc_id},
index_name,
kb_id # Pass actual kb_id (delete() will handle metadata tables correctly)
)
logging.debug(f"[METADATA DELETE] Deleted count: {deleted_count}")
# Only check if table should be dropped if not skipped (for bulk operations)
# Note: delete operation already uses refresh=True, so data is immediately available
if not skip_empty_check:
# Check by querying the actual metadata table (not MySQL)
cls._drop_empty_metadata_table(index_name, tenant_id)
logging.debug(f"Successfully deleted metadata for document {doc_id}")
return True
except Exception as e:
logging.error(f"Error deleting metadata for document {doc_id}: {e}")
return False
@classmethod
def _drop_empty_metadata_table(cls, index_name: str, tenant_id: str) -> None:
"""
Check if metadata table is empty and drop it if so.
Uses optimized count query instead of full search.
This prevents accumulation of empty metadata tables.
Args:
index_name: Metadata table/index name
tenant_id: Tenant ID
"""
try:
logging.debug(f"[DROP EMPTY TABLE] Starting empty table check for: {index_name}")
# Check if table exists first (cheap operation)
if not settings.docStoreConn.index_exist(index_name, ""):
logging.debug(f"[DROP EMPTY TABLE] Metadata table {index_name} does not exist, skipping")
return
logging.debug(f"[DROP EMPTY TABLE] Table {index_name} exists, checking if empty...")
# Use ES count API for accurate count
# Note: No need to refresh since delete operation already uses refresh=True
try:
count_response = settings.docStoreConn.es.count(index=index_name)
total_count = count_response['count']
logging.debug(f"[DROP EMPTY TABLE] ES count API result: {total_count} documents")
is_empty = (total_count == 0)
except Exception as e:
logging.warning(f"[DROP EMPTY TABLE] Count API failed, falling back to search: {e}")
# Fallback to search if count fails
results = settings.docStoreConn.search(
select_fields=["id"],
highlight_fields=[],
condition={},
match_expressions=[],
order_by=OrderByExpr(),
offset=0,
limit=1, # Only need 1 result to know if table is non-empty
index_names=index_name,
knowledgebase_ids=[""] # Metadata tables don't filter by KB
)
logging.debug(f"[DROP EMPTY TABLE] Search results type: {type(results)}, results: {results}")
# Check if empty based on return type (fallback search only)
if isinstance(results, tuple) and len(results) == 2:
# Infinity returns (DataFrame, int)
df, total = results
logging.debug(f"[DROP EMPTY TABLE] Infinity format - total: {total}, df length: {len(df) if hasattr(df, '__len__') else 'N/A'}")
is_empty = (total == 0 or (hasattr(df, '__len__') and len(df) == 0))
elif hasattr(results, 'get') and 'hits' in results:
# ES format - MUST check this before hasattr(results, '__len__')
# because ES response objects also have __len__
total = results.get('hits', {}).get('total', {})
hits = results.get('hits', {}).get('hits', [])
# ES 7.x+: total is a dict like {'value': 0, 'relation': 'eq'}
# ES 6.x: total is an int
if isinstance(total, dict):
total_count = total.get('value', 0)
else:
total_count = total
logging.debug(f"[DROP EMPTY TABLE] ES format - total: {total_count}, hits count: {len(hits)}")
is_empty = (total_count == 0 or len(hits) == 0)
elif hasattr(results, '__len__'):
# DataFrame or list (check this AFTER ES format)
result_len = len(results)
logging.debug(f"[DROP EMPTY TABLE] List/DataFrame format - length: {result_len}")
is_empty = result_len == 0
else:
logging.warning(f"[DROP EMPTY TABLE] Unknown result format: {type(results)}")
is_empty = False
if is_empty:
logging.debug(f"[DROP EMPTY TABLE] Metadata table {index_name} is empty, dropping it")
drop_result = settings.docStoreConn.delete_idx(index_name, "")
logging.debug(f"[DROP EMPTY TABLE] Drop result: {drop_result}")
else:
logging.debug(f"[DROP EMPTY TABLE] Metadata table {index_name} still has documents, keeping it")
except Exception as e:
# Log but don't fail - metadata deletion was successful
logging.error(f"[DROP EMPTY TABLE] Failed to check/drop empty metadata table {index_name}: {e}")
@classmethod
@DB.connection_context()
def get_document_metadata(cls, doc_id: str) -> Dict:
"""
Get document metadata from ES/Infinity.
Args:
doc_id: Document ID
Returns:
Metadata dictionary, empty dict if not found
"""
try:
# Get document with tenant_id
doc_query = Document.select(Document, Knowledgebase.tenant_id).join(
Knowledgebase, on=(Knowledgebase.id == Document.kb_id)
).where(Document.id == doc_id)
doc = doc_query.first()
if not doc:
logging.warning(f"Document {doc_id} not found")
return {}
# Extract fields
doc_obj = doc
tenant_id = doc.knowledgebase.tenant_id
kb_id = doc_obj.kb_id
index_name = cls._get_doc_meta_index_name(tenant_id)
# Try to get metadata from ES/Infinity
metadata_doc = settings.docStoreConn.get(
doc_id,
index_name,
[kb_id]
)
if metadata_doc:
# Extract and unflatten metadata
return cls._extract_metadata(metadata_doc)
return {}
except Exception as e:
logging.error(f"Error getting metadata for document {doc_id}: {e}")
return {}
@classmethod
@DB.connection_context()
def get_meta_by_kbs(cls, kb_ids: List[str]) -> Dict:
"""
Get metadata for documents in knowledge bases (Legacy).
Legacy metadata aggregator (backward-compatible).
- Does NOT expand list values and a list is kept as one string key.
Example: {"tags": ["foo","bar"]} -> meta["tags"]["['foo', 'bar']"] = [doc_id]
- Expects meta_fields is a dict.
Use when existing callers rely on the old list-as-string semantics.
Args:
kb_ids: List of knowledge base IDs
Returns:
Metadata dictionary in format: {field_name: {value: [doc_ids]}}
"""
try:
# Get tenant_id from first KB
kb = Knowledgebase.get_by_id(kb_ids[0])
if not kb:
return {}
tenant_id = kb.tenant_id
index_name = cls._get_doc_meta_index_name(tenant_id)
condition = {"kb_id": kb_ids}
order_by = OrderByExpr()
# Query with large limit
results = settings.docStoreConn.search(
select_fields=["*"],
highlight_fields=[],
condition=condition,
match_expressions=[],
order_by=order_by,
offset=0,
limit=10000,
index_names=index_name,
knowledgebase_ids=kb_ids
)
logging.debug(f"[get_meta_by_kbs] index_name: {index_name}, kb_ids: {kb_ids}")
# Aggregate metadata (legacy: keeps lists as string keys)
meta = {}
# Use helper to iterate over results in any format
for doc_id, doc in cls._iter_search_results(results):
# Extract metadata fields (exclude system fields)
doc_meta = cls._extract_metadata(doc)
# Legacy: Keep lists as string keys (do NOT expand)
for k, v in doc_meta.items():
if k not in meta:
meta[k] = {}
# If not list, make it a list
if not isinstance(v, list):
v = [v]
# Legacy: Use the entire list as a string key
# Skip nested lists/dicts
if isinstance(v, list) and any(isinstance(x, (list, dict)) for x in v):
continue
list_key = str(v)
if list_key not in meta[k]:
meta[k][list_key] = []
meta[k][list_key].append(doc_id)
logging.debug(f"[get_meta_by_kbs] KBs: {kb_ids}, Returning metadata: {meta}")
return meta
except Exception as e:
logging.error(f"Error getting metadata for KBs {kb_ids}: {e}")
return {}
@classmethod
@DB.connection_context()
def get_flatted_meta_by_kbs(cls, kb_ids: List[str]) -> Dict:
"""
Get flattened metadata for documents in knowledge bases.
- Parses stringified JSON meta_fields when possible and skips non-dict or unparsable values.
- Expands list values into individual entries.
Example: {"tags": ["foo","bar"], "author": "alice"} ->
meta["tags"]["foo"] = [doc_id], meta["tags"]["bar"] = [doc_id], meta["author"]["alice"] = [doc_id]
Prefer for metadata_condition filtering and scenarios that must respect list semantics.
Args:
kb_ids: List of knowledge base IDs
Returns:
Metadata dictionary in format: {field_name: {value: [doc_ids]}}
"""
try:
# Get tenant_id from first KB
kb = Knowledgebase.get_by_id(kb_ids[0])
if not kb:
return {}
tenant_id = kb.tenant_id
index_name = cls._get_doc_meta_index_name(tenant_id)
condition = {"kb_id": kb_ids}
order_by = OrderByExpr()
# Query with large limit
results = settings.docStoreConn.search(
select_fields=["*"], # Get all fields
highlight_fields=[],
condition=condition,
match_expressions=[],
order_by=order_by,
offset=0,
limit=10000,
index_names=index_name,
knowledgebase_ids=kb_ids
)
logging.debug(f"[get_flatted_meta_by_kbs] index_name: {index_name}, kb_ids: {kb_ids}")
logging.debug(f"[get_flatted_meta_by_kbs] results type: {type(results)}")
# Aggregate metadata
meta = {}
# Use helper to iterate over results in any format
for doc_id, doc in cls._iter_search_results(results):
# Extract metadata fields (exclude system fields)
doc_meta = cls._extract_metadata(doc)
for k, v in doc_meta.items():
if k not in meta:
meta[k] = {}
values = v if isinstance(v, list) else [v]
for vv in values:
if vv is None:
continue
sv = str(vv)
if sv not in meta[k]:
meta[k][sv] = []
meta[k][sv].append(doc_id)
logging.debug(f"[get_flatted_meta_by_kbs] KBs: {kb_ids}, Returning metadata: {meta}")
return meta
except Exception as e:
logging.error(f"Error getting flattened metadata for KBs {kb_ids}: {e}")
return {}
@classmethod
def get_metadata_for_documents(cls, doc_ids: Optional[List[str]], kb_id: str) -> Dict[str, Dict]:
"""
Get metadata fields for specific documents.
Returns a mapping of doc_id -> meta_fields
Args:
doc_ids: List of document IDs (if None, gets all documents with metadata for the KB)
kb_id: Knowledge base ID
Returns:
Dictionary mapping doc_id to meta_fields dict
"""
try:
results = cls._search_metadata(kb_id, condition={"kb_id": kb_id})
if not results:
return {}
# Build mapping: doc_id -> meta_fields
meta_mapping = {}
# If doc_ids is provided, create a set for efficient lookup
doc_ids_set = set(doc_ids) if doc_ids else None
# Use helper to iterate over results in any format
for doc_id, doc in cls._iter_search_results(results):
# Filter by doc_ids if provided
if doc_ids_set is not None and doc_id not in doc_ids_set:
continue
# Extract metadata (handles both JSON strings and dicts)
doc_meta = cls._extract_metadata(doc)
if doc_meta:
meta_mapping[doc_id] = doc_meta
logging.debug(f"[get_metadata_for_documents] Found metadata for {len(meta_mapping)}/{len(doc_ids) if doc_ids else 'all'} documents")
return meta_mapping
except Exception as e:
logging.error(f"Error getting metadata for documents: {e}")
return {}
@classmethod
@DB.connection_context()
def get_metadata_summary(cls, kb_id: str, doc_ids=None) -> Dict:
"""
Get metadata summary for documents in a knowledge base.
Args:
kb_id: Knowledge base ID
doc_ids: Optional list of document IDs to filter by
Returns:
Dictionary with metadata field statistics in format:
{
"field_name": {
"type": "string" | "number" | "list" | "time",
"values": [("value1", count1), ("value2", count2), ...] # sorted by count desc
}
}
"""
def _is_time_string(value: str) -> bool:
"""Check if a string value is an ISO 8601 datetime (e.g., '2026-02-03T00:00:00')."""
if not isinstance(value, str):
return False
return bool(re.match(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value))
def _meta_value_type(value):
"""Determine the type of a metadata value."""
if value is None:
return None
if isinstance(value, list):
return "list"
if isinstance(value, bool):
return "string"
if isinstance(value, (int, float)):
return "number"
if isinstance(value, str) and _is_time_string(value):
return "time"
return "string"
try:
results = cls._search_metadata(kb_id, condition={"kb_id": kb_id})
if not results:
return {}
# If doc_ids are provided, we'll filter after the search
doc_ids_set = set(doc_ids) if doc_ids else None
# Aggregate metadata
summary = {}
type_counter = {}
logging.debug(f"[METADATA SUMMARY] KB: {kb_id}, doc_ids: {doc_ids}")
# Use helper to iterate over results in any format
for doc_id, doc in cls._iter_search_results(results):
# Check doc_ids filter
if doc_ids_set and doc_id not in doc_ids_set:
continue
doc_meta = cls._extract_metadata(doc)
for k, v in doc_meta.items():
# Track type counts for this field
value_type = _meta_value_type(v)
if value_type:
if k not in type_counter:
type_counter[k] = {}
type_counter[k][value_type] = type_counter[k].get(value_type, 0) + 1
# Aggregate value counts
values = v if isinstance(v, list) else [v]
for vv in values:
if vv is None:
continue
sv = str(vv)
if k not in summary:
summary[k] = {}
summary[k][sv] = summary[k].get(sv, 0) + 1
# Build result with type information and sorted values
result = {}
for k, v in summary.items():
values = sorted([(val, cnt) for val, cnt in v.items()], key=lambda x: x[1], reverse=True)
type_counts = type_counter.get(k, {})
value_type = "string"
if type_counts:
value_type = max(type_counts.items(), key=lambda item: item[1])[0]
result[k] = {"type": value_type, "values": values}
logging.debug(f"[METADATA SUMMARY] Final result: {result}")
return result
except Exception as e:
logging.error(f"Error getting metadata summary for KB {kb_id}: {e}")
return {}
@classmethod
@DB.connection_context()
def batch_update_metadata(cls, kb_id: str, doc_ids: List[str], updates=None, deletes=None) -> int:
"""
Batch update metadata for documents in a knowledge base.
Args:
kb_id: Knowledge base ID
doc_ids: List of document IDs to update
updates: List of update operations, each with:
- key: field name to update
- value: new value
- match (optional): only update if current value matches this
deletes: List of delete operations, each with:
- key: field name to delete from
- value (optional): specific value to delete (if not provided, deletes the entire field)
Returns:
Number of documents updated
Examples:
updates = [{"key": "author", "value": "John"}]
updates = [{"key": "tags", "value": "new", "match": "old"}] # Replace "old" with "new" in tags list
deletes = [{"key": "author"}] # Delete entire author field
deletes = [{"key": "tags", "value": "obsolete"}] # Remove "obsolete" from tags list
"""
updates = updates or []
deletes = deletes or []
if not doc_ids:
return 0
def _normalize_meta(meta):
"""Normalize metadata to a dict."""
if isinstance(meta, str):
try:
meta = json.loads(meta)
except Exception:
return {}
if not isinstance(meta, dict):
return {}
return deepcopy(meta)
def _str_equal(a, b):
"""Compare two values as strings."""
return str(a) == str(b)
def _apply_updates(meta):
"""Apply update operations to metadata."""
changed = False
for upd in updates:
key = upd.get("key")
if not key:
continue
new_value = upd.get("value")
match_value = upd.get("match", None)
match_provided = match_value is not None and match_value != ""
if key not in meta:
if match_provided:
continue
meta[key] = dedupe_list(new_value) if isinstance(new_value, list) else new_value
changed = True
continue
if isinstance(meta[key], list):
if not match_provided:
# No match provided, append new_value to the list
if isinstance(new_value, list):
meta[key] = dedupe_list(meta[key] + new_value)
else:
meta[key] = dedupe_list(meta[key] + [new_value])
changed = True
else:
# Replace items matching match_value with new_value
replaced = False
new_list = []
for item in meta[key]:
if _str_equal(item, match_value):
new_list.append(new_value)
replaced = True
else:
new_list.append(item)
if replaced:
meta[key] = dedupe_list(new_list)
changed = True
else:
if not match_provided:
meta[key] = new_value
changed = True
else:
if _str_equal(meta[key], match_value):
meta[key] = new_value
changed = True
return changed
def _apply_deletes(meta):
"""Apply delete operations to metadata."""
changed = False
for d in deletes:
key = d.get("key")
if not key or key not in meta:
continue
value = d.get("value", None)
if isinstance(meta[key], list):
if value is None:
del meta[key]
changed = True
continue
new_list = [item for item in meta[key] if not _str_equal(item, value)]
if len(new_list) != len(meta[key]):
if new_list:
meta[key] = new_list
else:
del meta[key]
changed = True
else:
if value is None or _str_equal(meta[key], value):
del meta[key]
changed = True
return changed
try:
results = cls._search_metadata(kb_id, condition=None)
if not results:
results = [] # Treat as empty list if None
updated_docs = 0
doc_ids_set = set(doc_ids)
found_doc_ids = set()
logging.debug(f"[batch_update_metadata] Searching for doc_ids: {doc_ids}")
# Use helper to iterate over results in any format
for doc_id, doc in cls._iter_search_results(results):
# Filter to only process requested doc_ids
if doc_id not in doc_ids_set:
continue
found_doc_ids.add(doc_id)
# Get current metadata
current_meta = cls._extract_metadata(doc)
meta = _normalize_meta(current_meta)
original_meta = deepcopy(meta)
logging.debug(f"[batch_update_metadata] Doc {doc_id}: current_meta={current_meta}, meta={meta}")
logging.debug(f"[batch_update_metadata] Updates to apply: {updates}, Deletes: {deletes}")
# Apply updates and deletes
changed = _apply_updates(meta)
logging.debug(f"[batch_update_metadata] After _apply_updates: changed={changed}, meta={meta}")
changed = _apply_deletes(meta) or changed
logging.debug(f"[batch_update_metadata] After _apply_deletes: changed={changed}, meta={meta}")
# Update if changed
if changed and meta != original_meta:
logging.debug(f"[batch_update_metadata] Updating doc_id: {doc_id}, meta: {meta}")
# If metadata is empty, delete the row entirely instead of keeping empty metadata
if not meta:
cls.delete_document_metadata(doc_id, skip_empty_check=True)
else:
cls.update_document_metadata(doc_id, meta)
updated_docs += 1
# Handle documents that don't have metadata rows yet
# These documents weren't in the search results, so we need to insert new metadata for them
missing_doc_ids = doc_ids_set - found_doc_ids
if missing_doc_ids and updates:
logging.debug(f"[batch_update_metadata] Inserting new metadata for documents without metadata rows: {missing_doc_ids}")
for doc_id in missing_doc_ids:
# Apply updates to create new metadata
meta = {}
_apply_updates(meta)
if meta:
# Only insert if there's actual metadata to add
cls.update_document_metadata(doc_id, meta)
updated_docs += 1
logging.debug(f"[batch_update_metadata] Inserted metadata for doc_id: {doc_id}, meta: {meta}")
logging.debug(f"[batch_update_metadata] KB: {kb_id}, doc_ids: {doc_ids}, updated: {updated_docs}")
return updated_docs
except Exception as e:
logging.error(f"Error in batch_update_metadata for KB {kb_id}: {e}")
return 0
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/doc_metadata_service.py",
"license": "Apache License 2.0",
"lines": 991,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_metadata_retrieval.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
End-to-end test for metadata filtering during retrieval.
Tests that chunks are only retrieved from documents matching the metadata condition.
"""
import pytest
import logging
from common import (
create_dataset,
delete_datasets,
list_documents,
update_document,
)
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def _condition_parsing_complete(_auth, dataset_id):
res = list_documents(_auth, dataset_id)
if res["code"] != 0:
return False
for doc in res["data"]["docs"]:
status = doc.get("run", "UNKNOWN")
if status == "FAILED":
pytest.fail(f"Document parsing failed: {doc}")
return False
if status != "DONE":
return False
return True
@pytest.fixture(scope="function")
def add_dataset_with_metadata(HttpApiAuth):
# First create the dataset
res = create_dataset(HttpApiAuth, {
"name": f"test_metadata_{int(__import__('time').time())}",
"chunk_method": "naive"
})
assert res["code"] == 0, f"Failed to create dataset: {res}"
dataset_id = res["data"]["id"]
# Then configure metadata via the update_metadata_setting endpoint
import requests
from configs import HOST_ADDRESS, VERSION
metadata_config = {
"type": "object",
"properties": {
"character": {
"description": "Historical figure name",
"type": "string"
},
"era": {
"description": "Historical era",
"type": "string"
},
"achievements": {
"description": "Major achievements",
"type": "array",
"items": {
"type": "string"
}
}
}
}
res = requests.post(
url=f"{HOST_ADDRESS}/{VERSION}/kb/update_metadata_setting",
headers={"Content-Type": "application/json"},
auth=HttpApiAuth,
json={
"kb_id": dataset_id,
"metadata": metadata_config,
"enable_metadata": False
}
).json()
assert res["code"] == 0, f"Failed to configure metadata: {res}"
yield dataset_id
# Cleanup
delete_datasets(HttpApiAuth, {"ids": [dataset_id]})
@pytest.mark.p2
class TestMetadataWithRetrieval:
"""Test retrieval with metadata filtering."""
def test_retrieval_with_metadata_filter(self, HttpApiAuth, add_dataset_with_metadata, tmp_path):
"""
Test that retrieval respects metadata filters.
Verifies that chunks are only retrieved from documents matching the metadata condition.
"""
from common import upload_documents, parse_documents, retrieval_chunks
dataset_id = add_dataset_with_metadata
# Create two documents with different metadata
content_doc1 = "Document about Zhuge Liang who lived in Three Kingdoms period."
content_doc2 = "Document about Cao Cao who lived in Late Eastern Han Dynasty."
fp1 = tmp_path / "doc1_zhuge_liang.txt"
fp2 = tmp_path / "doc2_cao_cao.txt"
with open(fp1, "w", encoding="utf-8") as f:
f.write(content_doc1)
with open(fp2, "w", encoding="utf-8") as f:
f.write(content_doc2)
# Upload both documents
res = upload_documents(HttpApiAuth, dataset_id, [fp1, fp2])
assert res["code"] == 0, f"Failed to upload documents: {res}"
doc1_id = res["data"][0]["id"]
doc2_id = res["data"][1]["id"]
# Add different metadata to each document
res = update_document(HttpApiAuth, dataset_id, doc1_id, {
"meta_fields": {"character": "Zhuge Liang", "era": "Three Kingdoms"}
})
assert res["code"] == 0, f"Failed to update doc1 metadata: {res}"
res = update_document(HttpApiAuth, dataset_id, doc2_id, {
"meta_fields": {"character": "Cao Cao", "era": "Late Eastern Han"}
})
assert res["code"] == 0, f"Failed to update doc2 metadata: {res}"
# Parse both documents
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": [doc1_id, doc2_id]})
assert res["code"] == 0, f"Failed to trigger parsing: {res}"
# Wait for parsing to complete
assert _condition_parsing_complete(HttpApiAuth, dataset_id), "Parsing timeout"
# Test retrieval WITH metadata filter for "Zhuge Liang"
res = retrieval_chunks(HttpApiAuth, {
"question": "Zhuge Liang",
"dataset_ids": [dataset_id],
"metadata_condition": {
"logic": "and",
"conditions": [
{
"name": "character",
"comparison_operator": "is",
"value": "Zhuge Liang"
}
]
}
})
assert res["code"] == 0, f"Retrieval with metadata filter failed: {res}"
chunks_with_filter = res["data"]["chunks"]
doc_ids_with_filter = set(chunk.get("document_id", "") for chunk in chunks_with_filter)
logging.info(f"✓ Retrieved {len(chunks_with_filter)} chunks from documents: {doc_ids_with_filter}")
# Verify that filtered results only contain doc1 (Zhuge Liang)
if len(chunks_with_filter) > 0:
assert doc1_id in doc_ids_with_filter, f"Filtered results should contain doc1 (Zhuge Liang), but got: {doc_ids_with_filter}"
assert doc2_id not in doc_ids_with_filter, f"Filtered results should NOT contain doc2 (Cao Cao), but got: {doc_ids_with_filter}"
logging.info("Metadata filter correctly excluded chunks from non-matching documents")
else:
logging.warning("No chunks retrieved with filter - this might be due to embedding model not configured")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_metadata_retrieval.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:agent/sandbox/client.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Sandbox client for agent components.
This module provides a unified interface for agent components to interact
with the configured sandbox provider.
"""
import json
import logging
from typing import Dict, Any, Optional
from api.db.services.system_settings_service import SystemSettingsService
from agent.sandbox.providers import ProviderManager
from agent.sandbox.providers.base import ExecutionResult
logger = logging.getLogger(__name__)
# Global provider manager instance
_provider_manager: Optional[ProviderManager] = None
def get_provider_manager() -> ProviderManager:
"""
Get the global provider manager instance.
Returns:
ProviderManager instance with active provider loaded
"""
global _provider_manager
if _provider_manager is not None:
return _provider_manager
# Initialize provider manager with system settings
_provider_manager = ProviderManager()
_load_provider_from_settings()
return _provider_manager
def _load_provider_from_settings() -> None:
"""
Load sandbox provider from system settings and configure the provider manager.
This function reads the system settings to determine which provider is active
and initializes it with the appropriate configuration.
"""
global _provider_manager
if _provider_manager is None:
return
try:
# Get active provider type
provider_type_settings = SystemSettingsService.get_by_name("sandbox.provider_type")
if not provider_type_settings:
raise RuntimeError(
"Sandbox provider type not configured. Please set 'sandbox.provider_type' in system settings."
)
provider_type = provider_type_settings[0].value
# Get provider configuration
provider_config_settings = SystemSettingsService.get_by_name(f"sandbox.{provider_type}")
if not provider_config_settings:
logger.warning(f"No configuration found for provider: {provider_type}")
config = {}
else:
try:
config = json.loads(provider_config_settings[0].value)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse sandbox config for {provider_type}: {e}")
config = {}
# Import and instantiate the provider
from agent.sandbox.providers import (
SelfManagedProvider,
AliyunCodeInterpreterProvider,
E2BProvider,
)
provider_classes = {
"self_managed": SelfManagedProvider,
"aliyun_codeinterpreter": AliyunCodeInterpreterProvider,
"e2b": E2BProvider,
}
if provider_type not in provider_classes:
logger.error(f"Unknown provider type: {provider_type}")
return
provider_class = provider_classes[provider_type]
provider = provider_class()
# Initialize the provider
if not provider.initialize(config):
logger.error(f"Failed to initialize sandbox provider: {provider_type}. Config keys: {list(config.keys())}")
return
# Set the active provider
_provider_manager.set_provider(provider_type, provider)
logger.info(f"Sandbox provider '{provider_type}' initialized successfully")
except Exception as e:
logger.error(f"Failed to load sandbox provider from settings: {e}")
import traceback
traceback.print_exc()
def reload_provider() -> None:
"""
Reload the sandbox provider from system settings.
Use this function when sandbox settings have been updated.
"""
global _provider_manager
_provider_manager = None
_load_provider_from_settings()
def execute_code(
code: str,
language: str = "python",
timeout: int = 30,
arguments: Optional[Dict[str, Any]] = None
) -> ExecutionResult:
"""
Execute code in the configured sandbox.
This is the main entry point for agent components to execute code.
Args:
code: Source code to execute
language: Programming language (python, nodejs, javascript)
timeout: Maximum execution time in seconds
arguments: Optional arguments dict to pass to main() function
Returns:
ExecutionResult containing stdout, stderr, exit_code, and metadata
Raises:
RuntimeError: If no provider is configured or execution fails
"""
provider_manager = get_provider_manager()
if not provider_manager.is_configured():
raise RuntimeError(
"No sandbox provider configured. Please configure sandbox settings in the admin panel."
)
provider = provider_manager.get_provider()
# Create a sandbox instance
instance = provider.create_instance(template=language)
try:
# Execute the code
result = provider.execute_code(
instance_id=instance.instance_id,
code=code,
language=language,
timeout=timeout,
arguments=arguments
)
return result
finally:
# Clean up the instance
try:
provider.destroy_instance(instance.instance_id)
except Exception as e:
logger.warning(f"Failed to destroy sandbox instance {instance.instance_id}: {e}")
def health_check() -> bool:
"""
Check if the sandbox provider is healthy.
Returns:
True if provider is configured and healthy, False otherwise
"""
try:
provider_manager = get_provider_manager()
if not provider_manager.is_configured():
return False
provider = provider_manager.get_provider()
return provider.health_check()
except Exception as e:
logger.error(f"Sandbox health check failed: {e}")
return False
def get_provider_info() -> Dict[str, Any]:
"""
Get information about the current sandbox provider.
Returns:
Dictionary with provider information:
- provider_type: Type of the active provider
- configured: Whether provider is configured
- healthy: Whether provider is healthy
"""
try:
provider_manager = get_provider_manager()
return {
"provider_type": provider_manager.get_provider_name(),
"configured": provider_manager.is_configured(),
"healthy": health_check(),
}
except Exception as e:
logger.error(f"Failed to get provider info: {e}")
return {
"provider_type": None,
"configured": False,
"healthy": False,
}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/client.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/sandbox/providers/aliyun_codeinterpreter.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Aliyun Code Interpreter provider implementation.
This provider integrates with Aliyun Function Compute Code Interpreter service
for secure code execution in serverless microVMs using the official agentrun-sdk.
Official Documentation: https://help.aliyun.com/zh/functioncompute/fc/sandbox-sandbox-code-interepreter
Official SDK: https://github.com/Serverless-Devs/agentrun-sdk-python
https://api.aliyun.com/api/AgentRun/2025-09-10/CreateTemplate?lang=PYTHON
https://api.aliyun.com/api/AgentRun/2025-09-10/CreateSandbox?lang=PYTHON
"""
import logging
import os
import time
from typing import Dict, Any, List, Optional
from datetime import datetime, timezone
from agentrun.sandbox import TemplateType, CodeLanguage, Template, TemplateInput, Sandbox
from agentrun.utils.config import Config
from agentrun.utils.exception import ServerError
from .base import SandboxProvider, SandboxInstance, ExecutionResult
logger = logging.getLogger(__name__)
class AliyunCodeInterpreterProvider(SandboxProvider):
"""
Aliyun Code Interpreter provider implementation.
This provider uses the official agentrun-sdk to interact with
Aliyun Function Compute's Code Interpreter service.
"""
def __init__(self):
self.access_key_id: Optional[str] = None
self.access_key_secret: Optional[str] = None
self.account_id: Optional[str] = None
self.region: str = "cn-hangzhou"
self.template_name: str = ""
self.timeout: int = 30
self._initialized: bool = False
self._config: Optional[Config] = None
def initialize(self, config: Dict[str, Any]) -> bool:
"""
Initialize the provider with Aliyun credentials.
Args:
config: Configuration dictionary with keys:
- access_key_id: Aliyun AccessKey ID
- access_key_secret: Aliyun AccessKey Secret
- account_id: Aliyun primary account ID (主账号ID)
- region: Region (default: "cn-hangzhou")
- template_name: Optional sandbox template name
- timeout: Request timeout in seconds (default: 30, max 30)
Returns:
True if initialization successful, False otherwise
"""
# Get values from config or environment variables
access_key_id = config.get("access_key_id") or os.getenv("AGENTRUN_ACCESS_KEY_ID")
access_key_secret = config.get("access_key_secret") or os.getenv("AGENTRUN_ACCESS_KEY_SECRET")
account_id = config.get("account_id") or os.getenv("AGENTRUN_ACCOUNT_ID")
region = config.get("region") or os.getenv("AGENTRUN_REGION", "cn-hangzhou")
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self.account_id = account_id
self.region = region
self.template_name = config.get("template_name", "")
self.timeout = min(config.get("timeout", 30), 30) # Max 30 seconds
logger.info(f"Aliyun Code Interpreter: Initializing with account_id={self.account_id}, region={self.region}")
# Validate required fields
if not self.access_key_id or not self.access_key_secret:
logger.error("Aliyun Code Interpreter: Missing access_key_id or access_key_secret")
return False
if not self.account_id:
logger.error("Aliyun Code Interpreter: Missing account_id (主账号ID)")
return False
# Create SDK configuration
try:
logger.info(f"Aliyun Code Interpreter: Creating Config object with account_id={self.account_id}")
self._config = Config(
access_key_id=self.access_key_id,
access_key_secret=self.access_key_secret,
account_id=self.account_id,
region_id=self.region,
timeout=self.timeout,
)
logger.info("Aliyun Code Interpreter: Config object created successfully")
# Verify connection with health check
if not self.health_check():
logger.error(f"Aliyun Code Interpreter: Health check failed for region {self.region}")
return False
self._initialized = True
logger.info(f"Aliyun Code Interpreter: Initialized successfully for region {self.region}")
return True
except Exception as e:
logger.error(f"Aliyun Code Interpreter: Initialization failed - {str(e)}")
return False
def create_instance(self, template: str = "python") -> SandboxInstance:
"""
Create a new sandbox instance in Aliyun Code Interpreter.
Args:
template: Programming language (python, javascript)
Returns:
SandboxInstance object
Raises:
RuntimeError: If instance creation fails
"""
if not self._initialized or not self._config:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# Normalize language
language = self._normalize_language(template)
try:
# Get or create template
from agentrun.sandbox import Sandbox
if self.template_name:
# Use existing template
template_name = self.template_name
else:
# Try to get default template, or create one if it doesn't exist
default_template_name = f"ragflow-{language}-default"
try:
# Check if template exists
Template.get_by_name(default_template_name, config=self._config)
template_name = default_template_name
except Exception:
# Create default template if it doesn't exist
template_input = TemplateInput(
template_name=default_template_name,
template_type=TemplateType.CODE_INTERPRETER,
)
Template.create(template_input, config=self._config)
template_name = default_template_name
# Create sandbox directly
sandbox = Sandbox.create(
template_type=TemplateType.CODE_INTERPRETER,
template_name=template_name,
sandbox_idle_timeout_seconds=self.timeout,
config=self._config,
)
instance_id = sandbox.sandbox_id
return SandboxInstance(
instance_id=instance_id,
provider="aliyun_codeinterpreter",
status="READY",
metadata={
"language": language,
"region": self.region,
"account_id": self.account_id,
"template_name": template_name,
"created_at": datetime.now(timezone.utc).isoformat(),
},
)
except ServerError as e:
raise RuntimeError(f"Failed to create sandbox instance: {str(e)}")
except Exception as e:
raise RuntimeError(f"Unexpected error creating instance: {str(e)}")
def execute_code(self, instance_id: str, code: str, language: str, timeout: int = 10, arguments: Optional[Dict[str, Any]] = None) -> ExecutionResult:
"""
Execute code in the Aliyun Code Interpreter instance.
Args:
instance_id: ID of the sandbox instance
code: Source code to execute
language: Programming language (python, javascript)
timeout: Maximum execution time in seconds (max 30)
arguments: Optional arguments dict to pass to main() function
Returns:
ExecutionResult containing stdout, stderr, exit_code, and metadata
Raises:
RuntimeError: If execution fails
TimeoutError: If execution exceeds timeout
"""
if not self._initialized or not self._config:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# Normalize language
normalized_lang = self._normalize_language(language)
# Enforce 30-second hard limit
timeout = min(timeout or self.timeout, 30)
try:
# Connect to existing sandbox instance
sandbox = Sandbox.connect(sandbox_id=instance_id, config=self._config)
# Convert language string to CodeLanguage enum
code_language = CodeLanguage.PYTHON if normalized_lang == "python" else CodeLanguage.JAVASCRIPT
# Wrap code to call main() function
# Matches self_managed provider behavior: call main(**arguments)
if normalized_lang == "python":
# Build arguments string for main() call
if arguments:
import json as json_module
args_json = json_module.dumps(arguments)
wrapped_code = f'''{code}
if __name__ == "__main__":
import json
result = main(**{args_json})
print(json.dumps(result) if isinstance(result, dict) else result)
'''
else:
wrapped_code = f'''{code}
if __name__ == "__main__":
import json
result = main()
print(json.dumps(result) if isinstance(result, dict) else result)
'''
else: # javascript
if arguments:
import json as json_module
args_json = json_module.dumps(arguments)
wrapped_code = f'''{code}
// Call main and output result
const result = main({args_json});
console.log(typeof result === 'object' ? JSON.stringify(result) : String(result));
'''
else:
wrapped_code = f'''{code}
// Call main and output result
const result = main();
console.log(typeof result === 'object' ? JSON.stringify(result) : String(result));
'''
logger.debug(f"Aliyun Code Interpreter: Wrapped code (first 200 chars): {wrapped_code[:200]}")
start_time = time.time()
# Execute code using SDK's simplified execute endpoint
logger.info(f"Aliyun Code Interpreter: Executing code (language={normalized_lang}, timeout={timeout})")
logger.debug(f"Aliyun Code Interpreter: Original code (first 200 chars): {code[:200]}")
result = sandbox.context.execute(
code=wrapped_code,
language=code_language,
timeout=timeout,
)
execution_time = time.time() - start_time
logger.info(f"Aliyun Code Interpreter: Execution completed in {execution_time:.2f}s")
logger.debug(f"Aliyun Code Interpreter: Raw SDK result: {result}")
# Parse execution result
results = result.get("results", []) if isinstance(result, dict) else []
logger.info(f"Aliyun Code Interpreter: Parsed {len(results)} result items")
# Extract stdout and stderr from results
stdout_parts = []
stderr_parts = []
exit_code = 0
execution_status = "ok"
for item in results:
result_type = item.get("type", "")
text = item.get("text", "")
if result_type == "stdout":
stdout_parts.append(text)
elif result_type == "stderr":
stderr_parts.append(text)
exit_code = 1 # Error occurred
elif result_type == "endOfExecution":
execution_status = item.get("status", "ok")
if execution_status != "ok":
exit_code = 1
elif result_type == "error":
stderr_parts.append(text)
exit_code = 1
stdout = "\n".join(stdout_parts)
stderr = "\n".join(stderr_parts)
logger.info(f"Aliyun Code Interpreter: stdout length={len(stdout)}, stderr length={len(stderr)}, exit_code={exit_code}")
if stdout:
logger.debug(f"Aliyun Code Interpreter: stdout (first 200 chars): {stdout[:200]}")
if stderr:
logger.debug(f"Aliyun Code Interpreter: stderr (first 200 chars): {stderr[:200]}")
return ExecutionResult(
stdout=stdout,
stderr=stderr,
exit_code=exit_code,
execution_time=execution_time,
metadata={
"instance_id": instance_id,
"language": normalized_lang,
"context_id": result.get("contextId") if isinstance(result, dict) else None,
"timeout": timeout,
},
)
except ServerError as e:
if "timeout" in str(e).lower():
raise TimeoutError(f"Execution timed out after {timeout} seconds")
raise RuntimeError(f"Failed to execute code: {str(e)}")
except Exception as e:
raise RuntimeError(f"Unexpected error during execution: {str(e)}")
def destroy_instance(self, instance_id: str) -> bool:
"""
Destroy an Aliyun Code Interpreter instance.
Args:
instance_id: ID of the instance to destroy
Returns:
True if destruction successful, False otherwise
"""
if not self._initialized or not self._config:
raise RuntimeError("Provider not initialized. Call initialize() first.")
try:
# Delete sandbox by ID directly
Sandbox.delete_by_id(sandbox_id=instance_id)
logger.info(f"Successfully destroyed sandbox instance {instance_id}")
return True
except ServerError as e:
logger.error(f"Failed to destroy instance {instance_id}: {str(e)}")
return False
except Exception as e:
logger.error(f"Unexpected error destroying instance {instance_id}: {str(e)}")
return False
def health_check(self) -> bool:
"""
Check if the Aliyun Code Interpreter service is accessible.
Returns:
True if provider is healthy, False otherwise
"""
if not self._initialized and not (self.access_key_id and self.account_id):
return False
try:
# Try to list templates to verify connection
from agentrun.sandbox import Template
templates = Template.list(config=self._config)
return templates is not None
except Exception as e:
logger.warning(f"Aliyun Code Interpreter health check failed: {str(e)}")
# If we get any response (even an error), the service is reachable
return "connection" not in str(e).lower()
def get_supported_languages(self) -> List[str]:
"""
Get list of supported programming languages.
Returns:
List of language identifiers
"""
return ["python", "javascript"]
@staticmethod
def get_config_schema() -> Dict[str, Dict]:
"""
Return configuration schema for Aliyun Code Interpreter provider.
Returns:
Dictionary mapping field names to their schema definitions
"""
return {
"access_key_id": {
"type": "string",
"required": True,
"label": "Access Key ID",
"placeholder": "LTAI5t...",
"description": "Aliyun AccessKey ID for authentication",
"secret": False,
},
"access_key_secret": {
"type": "string",
"required": True,
"label": "Access Key Secret",
"placeholder": "••••••••••••••••",
"description": "Aliyun AccessKey Secret for authentication",
"secret": True,
},
"account_id": {
"type": "string",
"required": True,
"label": "Account ID",
"placeholder": "1234567890...",
"description": "Aliyun primary account ID (主账号ID), required for API calls",
},
"region": {
"type": "string",
"required": False,
"label": "Region",
"default": "cn-hangzhou",
"description": "Aliyun region for Code Interpreter service",
"options": ["cn-hangzhou", "cn-beijing", "cn-shanghai", "cn-shenzhen", "cn-guangzhou"],
},
"template_name": {
"type": "string",
"required": False,
"label": "Template Name",
"placeholder": "my-interpreter",
"description": "Optional sandbox template name for pre-configured environments",
},
"timeout": {
"type": "integer",
"required": False,
"label": "Execution Timeout (seconds)",
"default": 30,
"min": 1,
"max": 30,
"description": "Code execution timeout (max 30 seconds - hard limit)",
},
}
def validate_config(self, config: Dict[str, Any]) -> tuple[bool, Optional[str]]:
"""
Validate Aliyun-specific configuration.
Args:
config: Configuration dictionary to validate
Returns:
Tuple of (is_valid, error_message)
"""
# Validate access key format
access_key_id = config.get("access_key_id", "")
if access_key_id and not access_key_id.startswith("LTAI"):
return False, "Invalid AccessKey ID format (should start with 'LTAI')"
# Validate account ID
account_id = config.get("account_id", "")
if not account_id:
return False, "Account ID is required"
# Validate region
valid_regions = ["cn-hangzhou", "cn-beijing", "cn-shanghai", "cn-shenzhen", "cn-guangzhou"]
region = config.get("region", "cn-hangzhou")
if region and region not in valid_regions:
return False, f"Invalid region. Must be one of: {', '.join(valid_regions)}"
# Validate timeout range (max 30 seconds)
timeout = config.get("timeout", 30)
if isinstance(timeout, int) and (timeout < 1 or timeout > 30):
return False, "Timeout must be between 1 and 30 seconds"
return True, None
def _normalize_language(self, language: str) -> str:
"""
Normalize language identifier to Aliyun format.
Args:
language: Language identifier (python, python3, javascript, nodejs)
Returns:
Normalized language identifier
"""
if not language:
return "python"
lang_lower = language.lower()
if lang_lower in ("python", "python3"):
return "python"
elif lang_lower in ("javascript", "nodejs"):
return "javascript"
else:
return language
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/providers/aliyun_codeinterpreter.py",
"license": "Apache License 2.0",
"lines": 428,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/sandbox/providers/base.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base interface for sandbox providers.
Each sandbox provider (self-managed, SaaS) implements this interface
to provide code execution capabilities.
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Any, Optional, List
@dataclass
class SandboxInstance:
"""Represents a sandbox execution instance"""
instance_id: str
provider: str
status: str # running, stopped, error
metadata: Dict[str, Any]
def __post_init__(self):
if self.metadata is None:
self.metadata = {}
@dataclass
class ExecutionResult:
"""Result of code execution in a sandbox"""
stdout: str
stderr: str
exit_code: int
execution_time: float # in seconds
metadata: Dict[str, Any]
def __post_init__(self):
if self.metadata is None:
self.metadata = {}
class SandboxProvider(ABC):
"""
Base interface for all sandbox providers.
Each provider implementation (self-managed, Aliyun OpenSandbox, E2B, etc.)
must implement these methods to provide code execution capabilities.
"""
@abstractmethod
def initialize(self, config: Dict[str, Any]) -> bool:
"""
Initialize the provider with configuration.
Args:
config: Provider-specific configuration dictionary
Returns:
True if initialization successful, False otherwise
"""
pass
@abstractmethod
def create_instance(self, template: str = "python") -> SandboxInstance:
"""
Create a new sandbox instance.
Args:
template: Programming language/template for the instance
(e.g., "python", "nodejs", "bash")
Returns:
SandboxInstance object representing the created instance
Raises:
RuntimeError: If instance creation fails
"""
pass
@abstractmethod
def execute_code(
self,
instance_id: str,
code: str,
language: str,
timeout: int = 10,
arguments: Optional[Dict[str, Any]] = None
) -> ExecutionResult:
"""
Execute code in a sandbox instance.
Args:
instance_id: ID of the sandbox instance
code: Source code to execute
language: Programming language (python, javascript, etc.)
timeout: Maximum execution time in seconds
arguments: Optional arguments dict to pass to main() function
Returns:
ExecutionResult containing stdout, stderr, exit_code, and metadata
Raises:
RuntimeError: If execution fails
TimeoutError: If execution exceeds timeout
"""
pass
@abstractmethod
def destroy_instance(self, instance_id: str) -> bool:
"""
Destroy a sandbox instance.
Args:
instance_id: ID of the instance to destroy
Returns:
True if destruction successful, False otherwise
Raises:
RuntimeError: If destruction fails
"""
pass
@abstractmethod
def health_check(self) -> bool:
"""
Check if the provider is healthy and accessible.
Returns:
True if provider is healthy, False otherwise
"""
pass
@abstractmethod
def get_supported_languages(self) -> List[str]:
"""
Get list of supported programming languages.
Returns:
List of language identifiers (e.g., ["python", "javascript", "go"])
"""
pass
@staticmethod
def get_config_schema() -> Dict[str, Dict]:
"""
Return configuration schema for this provider.
The schema defines what configuration fields are required/optional,
their types, validation rules, and UI labels.
Returns:
Dictionary mapping field names to their schema definitions.
Example:
{
"endpoint": {
"type": "string",
"required": True,
"label": "API Endpoint",
"placeholder": "http://localhost:9385"
},
"timeout": {
"type": "integer",
"default": 30,
"label": "Timeout (seconds)",
"min": 5,
"max": 300
}
}
"""
return {}
def validate_config(self, config: Dict[str, Any]) -> tuple[bool, Optional[str]]:
"""
Validate provider-specific configuration.
This method allows providers to implement custom validation logic beyond
the basic schema validation. Override this method to add provider-specific
checks like URL format validation, API key format validation, etc.
Args:
config: Configuration dictionary to validate
Returns:
Tuple of (is_valid, error_message):
- is_valid: True if configuration is valid, False otherwise
- error_message: Error message if invalid, None if valid
Example:
>>> def validate_config(self, config):
>>> endpoint = config.get("endpoint", "")
>>> if not endpoint.startswith(("http://", "https://")):
>>> return False, "Endpoint must start with http:// or https://"
>>> return True, None
"""
# Default implementation: no custom validation
return True, None | {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/providers/base.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:agent/sandbox/providers/e2b.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
E2B provider implementation.
This provider integrates with E2B Cloud for cloud-based code execution
using Firecracker microVMs.
"""
import uuid
from typing import Dict, Any, List
from .base import SandboxProvider, SandboxInstance, ExecutionResult
class E2BProvider(SandboxProvider):
"""
E2B provider implementation.
This provider uses E2B Cloud service for secure code execution
in Firecracker microVMs.
"""
def __init__(self):
self.api_key: str = ""
self.region: str = "us"
self.timeout: int = 30
self._initialized: bool = False
def initialize(self, config: Dict[str, Any]) -> bool:
"""
Initialize the provider with E2B credentials.
Args:
config: Configuration dictionary with keys:
- api_key: E2B API key
- region: Region (us, eu) (default: "us")
- timeout: Request timeout in seconds (default: 30)
Returns:
True if initialization successful, False otherwise
"""
self.api_key = config.get("api_key", "")
self.region = config.get("region", "us")
self.timeout = config.get("timeout", 30)
# Validate required fields
if not self.api_key:
return False
# TODO: Implement actual E2B API client initialization
# For now, we'll mark as initialized but actual API calls will fail
self._initialized = True
return True
def create_instance(self, template: str = "python") -> SandboxInstance:
"""
Create a new sandbox instance in E2B.
Args:
template: Programming language template (python, nodejs, go, bash)
Returns:
SandboxInstance object
Raises:
RuntimeError: If instance creation fails
"""
if not self._initialized:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# Normalize language
language = self._normalize_language(template)
# TODO: Implement actual E2B API call
# POST /sandbox with template
instance_id = str(uuid.uuid4())
return SandboxInstance(
instance_id=instance_id,
provider="e2b",
status="running",
metadata={
"language": language,
"region": self.region,
}
)
def execute_code(
self,
instance_id: str,
code: str,
language: str,
timeout: int = 10
) -> ExecutionResult:
"""
Execute code in the E2B instance.
Args:
instance_id: ID of the sandbox instance
code: Source code to execute
language: Programming language (python, nodejs, go, bash)
timeout: Maximum execution time in seconds
Returns:
ExecutionResult containing stdout, stderr, exit_code, and metadata
Raises:
RuntimeError: If execution fails
TimeoutError: If execution exceeds timeout
"""
if not self._initialized:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# TODO: Implement actual E2B API call
# POST /sandbox/{sandboxID}/execute
raise RuntimeError(
"E2B provider is not yet fully implemented. "
"Please use the self-managed provider or implement the E2B API integration. "
"See https://github.com/e2b-dev/e2b for API documentation."
)
def destroy_instance(self, instance_id: str) -> bool:
"""
Destroy an E2B instance.
Args:
instance_id: ID of the instance to destroy
Returns:
True if destruction successful, False otherwise
"""
if not self._initialized:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# TODO: Implement actual E2B API call
# DELETE /sandbox/{sandboxID}
return True
def health_check(self) -> bool:
"""
Check if the E2B service is accessible.
Returns:
True if provider is healthy, False otherwise
"""
if not self._initialized:
return False
# TODO: Implement actual E2B health check API call
# GET /healthz or similar
# For now, return True if initialized with API key
return bool(self.api_key)
def get_supported_languages(self) -> List[str]:
"""
Get list of supported programming languages.
Returns:
List of language identifiers
"""
return ["python", "nodejs", "javascript", "go", "bash"]
@staticmethod
def get_config_schema() -> Dict[str, Dict]:
"""
Return configuration schema for E2B provider.
Returns:
Dictionary mapping field names to their schema definitions
"""
return {
"api_key": {
"type": "string",
"required": True,
"label": "API Key",
"placeholder": "e2b_sk_...",
"description": "E2B API key for authentication",
"secret": True,
},
"region": {
"type": "string",
"required": False,
"label": "Region",
"default": "us",
"description": "E2B service region (us or eu)",
},
"timeout": {
"type": "integer",
"required": False,
"label": "Request Timeout (seconds)",
"default": 30,
"min": 5,
"max": 300,
"description": "API request timeout for code execution",
}
}
def _normalize_language(self, language: str) -> str:
"""
Normalize language identifier to E2B template format.
Args:
language: Language identifier
Returns:
Normalized language identifier
"""
if not language:
return "python"
lang_lower = language.lower()
if lang_lower in ("python", "python3"):
return "python"
elif lang_lower in ("javascript", "nodejs"):
return "nodejs"
else:
return language
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/providers/e2b.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/sandbox/providers/manager.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provider manager for sandbox providers.
Since sandbox configuration is global (system-level), we only use one
active provider at a time. This manager is a thin wrapper that holds a reference
to the currently active provider.
"""
from typing import Optional
from .base import SandboxProvider
class ProviderManager:
"""
Manages the currently active sandbox provider.
With global configuration, there's only one active provider at a time.
This manager simply holds a reference to that provider.
"""
def __init__(self):
"""Initialize an empty provider manager."""
self.current_provider: Optional[SandboxProvider] = None
self.current_provider_name: Optional[str] = None
def set_provider(self, name: str, provider: SandboxProvider):
"""
Set the active provider.
Args:
name: Provider identifier (e.g., "self_managed", "e2b")
provider: Provider instance
"""
self.current_provider = provider
self.current_provider_name = name
def get_provider(self) -> Optional[SandboxProvider]:
"""
Get the active provider.
Returns:
Currently active SandboxProvider instance, or None if not set
"""
return self.current_provider
def get_provider_name(self) -> Optional[str]:
"""
Get the active provider name.
Returns:
Provider name (e.g., "self_managed"), or None if not set
"""
return self.current_provider_name
def is_configured(self) -> bool:
"""
Check if a provider is configured.
Returns:
True if a provider is set, False otherwise
"""
return self.current_provider is not None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/providers/manager.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:agent/sandbox/providers/self_managed.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Self-managed sandbox provider implementation.
This provider wraps the existing executor_manager HTTP API which manages
a pool of Docker containers with gVisor for secure code execution.
"""
import base64
import time
import uuid
from typing import Dict, Any, List, Optional
import requests
from .base import SandboxProvider, SandboxInstance, ExecutionResult
class SelfManagedProvider(SandboxProvider):
"""
Self-managed sandbox provider using Daytona/Docker.
This provider communicates with the executor_manager HTTP API
which manages a pool of containers for code execution.
"""
def __init__(self):
self.endpoint: str = "http://localhost:9385"
self.timeout: int = 30
self.max_retries: int = 3
self.pool_size: int = 10
self._initialized: bool = False
def initialize(self, config: Dict[str, Any]) -> bool:
"""
Initialize the provider with configuration.
Args:
config: Configuration dictionary with keys:
- endpoint: HTTP endpoint (default: "http://localhost:9385")
- timeout: Request timeout in seconds (default: 30)
- max_retries: Maximum retry attempts (default: 3)
- pool_size: Container pool size for info (default: 10)
Returns:
True if initialization successful, False otherwise
"""
self.endpoint = config.get("endpoint", "http://localhost:9385")
self.timeout = config.get("timeout", 30)
self.max_retries = config.get("max_retries", 3)
self.pool_size = config.get("pool_size", 10)
# Validate endpoint is accessible
if not self.health_check():
# Try to fall back to SANDBOX_HOST from settings if we are using localhost
if "localhost" in self.endpoint or "127.0.0.1" in self.endpoint:
try:
from api import settings
if settings.SANDBOX_HOST and settings.SANDBOX_HOST not in self.endpoint:
original_endpoint = self.endpoint
self.endpoint = f"http://{settings.SANDBOX_HOST}:9385"
if self.health_check():
import logging
logging.warning(f"Sandbox self_managed: Connected using settings.SANDBOX_HOST fallback: {self.endpoint} (original: {original_endpoint})")
self._initialized = True
return True
else:
self.endpoint = original_endpoint # Restore if fallback also fails
except ImportError:
pass
return False
self._initialized = True
return True
def create_instance(self, template: str = "python") -> SandboxInstance:
"""
Create a new sandbox instance.
Note: For self-managed provider, instances are managed internally
by the executor_manager's container pool. This method returns
a logical instance handle.
Args:
template: Programming language (python, nodejs)
Returns:
SandboxInstance object
Raises:
RuntimeError: If instance creation fails
"""
if not self._initialized:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# Normalize language
language = self._normalize_language(template)
# The executor_manager manages instances internally via container pool
# We create a logical instance ID for tracking
instance_id = str(uuid.uuid4())
return SandboxInstance(
instance_id=instance_id,
provider="self_managed",
status="running",
metadata={
"language": language,
"endpoint": self.endpoint,
"pool_size": self.pool_size,
}
)
def execute_code(
self,
instance_id: str,
code: str,
language: str,
timeout: int = 10,
arguments: Optional[Dict[str, Any]] = None
) -> ExecutionResult:
"""
Execute code in the sandbox.
Args:
instance_id: ID of the sandbox instance (not used for self-managed)
code: Source code to execute
language: Programming language (python, nodejs, javascript)
timeout: Maximum execution time in seconds
arguments: Optional arguments dict to pass to main() function
Returns:
ExecutionResult containing stdout, stderr, exit_code, and metadata
Raises:
RuntimeError: If execution fails
TimeoutError: If execution exceeds timeout
"""
if not self._initialized:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# Normalize language
normalized_lang = self._normalize_language(language)
# Prepare request
code_b64 = base64.b64encode(code.encode("utf-8")).decode("utf-8")
payload = {
"code_b64": code_b64,
"language": normalized_lang,
"arguments": arguments or {}
}
url = f"{self.endpoint}/run"
exec_timeout = timeout or self.timeout
start_time = time.time()
try:
response = requests.post(
url,
json=payload,
timeout=exec_timeout,
headers={"Content-Type": "application/json"}
)
execution_time = time.time() - start_time
if response.status_code != 200:
raise RuntimeError(
f"HTTP {response.status_code}: {response.text}"
)
result = response.json()
return ExecutionResult(
stdout=result.get("stdout", ""),
stderr=result.get("stderr", ""),
exit_code=result.get("exit_code", 0),
execution_time=execution_time,
metadata={
"status": result.get("status"),
"time_used_ms": result.get("time_used_ms"),
"memory_used_kb": result.get("memory_used_kb"),
"detail": result.get("detail"),
"instance_id": instance_id,
}
)
except requests.Timeout:
execution_time = time.time() - start_time
raise TimeoutError(
f"Execution timed out after {exec_timeout} seconds"
)
except requests.RequestException as e:
raise RuntimeError(f"HTTP request failed: {str(e)}")
def destroy_instance(self, instance_id: str) -> bool:
"""
Destroy a sandbox instance.
Note: For self-managed provider, instances are returned to the
internal pool automatically by executor_manager after execution.
This is a no-op for tracking purposes.
Args:
instance_id: ID of the instance to destroy
Returns:
True (always succeeds for self-managed)
"""
# The executor_manager manages container lifecycle internally
# Container is returned to pool after execution
return True
def health_check(self) -> bool:
"""
Check if the provider is healthy and accessible.
Returns:
True if provider is healthy, False otherwise
"""
try:
url = f"{self.endpoint}/healthz"
response = requests.get(url, timeout=5)
return response.status_code == 200
except Exception:
return False
def get_supported_languages(self) -> List[str]:
"""
Get list of supported programming languages.
Returns:
List of language identifiers
"""
return ["python", "nodejs", "javascript"]
@staticmethod
def get_config_schema() -> Dict[str, Dict]:
"""
Return configuration schema for self-managed provider.
Returns:
Dictionary mapping field names to their schema definitions
"""
return {
"endpoint": {
"type": "string",
"required": True,
"label": "Executor Manager Endpoint",
"placeholder": "http://localhost:9385",
"default": "http://localhost:9385",
"description": "HTTP endpoint of the executor_manager service"
},
"timeout": {
"type": "integer",
"required": False,
"label": "Request Timeout (seconds)",
"default": 30,
"min": 5,
"max": 300,
"description": "HTTP request timeout for code execution"
},
"max_retries": {
"type": "integer",
"required": False,
"label": "Max Retries",
"default": 3,
"min": 0,
"max": 10,
"description": "Maximum number of retry attempts for failed requests"
},
"pool_size": {
"type": "integer",
"required": False,
"label": "Container Pool Size",
"default": 10,
"min": 1,
"max": 100,
"description": "Size of the container pool (configured in executor_manager)"
}
}
def _normalize_language(self, language: str) -> str:
"""
Normalize language identifier to executor_manager format.
Args:
language: Language identifier (python, python3, nodejs, javascript)
Returns:
Normalized language identifier
"""
if not language:
return "python"
lang_lower = language.lower()
if lang_lower in ("python", "python3"):
return "python"
elif lang_lower in ("javascript", "nodejs"):
return "nodejs"
else:
return language
def validate_config(self, config: dict) -> tuple[bool, Optional[str]]:
"""
Validate self-managed provider configuration.
Performs custom validation beyond the basic schema validation,
such as checking URL format.
Args:
config: Configuration dictionary to validate
Returns:
Tuple of (is_valid, error_message)
"""
# Validate endpoint URL format
endpoint = config.get("endpoint", "")
if endpoint:
# Check if it's a valid HTTP/HTTPS URL or localhost
import re
url_pattern = r'^(https?://|http://localhost|http://[\d\.]+:[a-z]+:[/]|http://[\w\.]+:)'
if not re.match(url_pattern, endpoint):
return False, f"Invalid endpoint format: {endpoint}. Must start with http:// or https://"
# Validate pool_size is positive
pool_size = config.get("pool_size", 10)
if isinstance(pool_size, int) and pool_size <= 0:
return False, "Pool size must be greater than 0"
# Validate timeout is reasonable
timeout = config.get("timeout", 30)
if isinstance(timeout, int) and (timeout < 1 or timeout > 600):
return False, "Timeout must be between 1 and 600 seconds"
# Validate max_retries
max_retries = config.get("max_retries", 3)
if isinstance(max_retries, int) and (max_retries < 0 or max_retries > 10):
return False, "Max retries must be between 0 and 10"
return True, None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/providers/self_managed.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/sandbox/tests/test_aliyun_codeinterpreter.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for Aliyun Code Interpreter provider.
These tests use mocks and don't require real Aliyun credentials.
Official Documentation: https://help.aliyun.com/zh/functioncompute/fc/sandbox-sandbox-code-interepreter
Official SDK: https://github.com/Serverless-Devs/agentrun-sdk-python
"""
import pytest
from unittest.mock import patch, MagicMock
from agent.sandbox.providers.base import SandboxProvider
from agent.sandbox.providers.aliyun_codeinterpreter import AliyunCodeInterpreterProvider
class TestAliyunCodeInterpreterProvider:
"""Test AliyunCodeInterpreterProvider implementation."""
def test_provider_initialization(self):
"""Test provider initialization."""
provider = AliyunCodeInterpreterProvider()
assert provider.access_key_id == ""
assert provider.access_key_secret == ""
assert provider.account_id == ""
assert provider.region == "cn-hangzhou"
assert provider.template_name == ""
assert provider.timeout == 30
assert not provider._initialized
@patch("agent.sandbox.providers.aliyun_codeinterpreter.Template")
def test_initialize_success(self, mock_template):
"""Test successful initialization."""
# Mock health check response
mock_template.list.return_value = []
provider = AliyunCodeInterpreterProvider()
result = provider.initialize(
{
"access_key_id": "LTAI5tXXXXXXXXXX",
"access_key_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"account_id": "1234567890123456",
"region": "cn-hangzhou",
"template_name": "python-sandbox",
"timeout": 20,
}
)
assert result is True
assert provider.access_key_id == "LTAI5tXXXXXXXXXX"
assert provider.access_key_secret == "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
assert provider.account_id == "1234567890123456"
assert provider.region == "cn-hangzhou"
assert provider.template_name == "python-sandbox"
assert provider.timeout == 20
assert provider._initialized
def test_initialize_missing_credentials(self):
"""Test initialization with missing credentials."""
provider = AliyunCodeInterpreterProvider()
# Missing access_key_id
result = provider.initialize({"access_key_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"})
assert result is False
# Missing access_key_secret
result = provider.initialize({"access_key_id": "LTAI5tXXXXXXXXXX"})
assert result is False
# Missing account_id
provider2 = AliyunCodeInterpreterProvider()
result = provider2.initialize({"access_key_id": "LTAI5tXXXXXXXXXX", "access_key_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"})
assert result is False
@patch("agent.sandbox.providers.aliyun_codeinterpreter.Template")
def test_initialize_default_config(self, mock_template):
"""Test initialization with default config."""
mock_template.list.return_value = []
provider = AliyunCodeInterpreterProvider()
result = provider.initialize({"access_key_id": "LTAI5tXXXXXXXXXX", "access_key_secret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "account_id": "1234567890123456"})
assert result is True
assert provider.region == "cn-hangzhou"
assert provider.template_name == ""
@patch("agent.sandbox.providers.aliyun_codeinterpreter.CodeInterpreterSandbox")
def test_create_instance_python(self, mock_sandbox_class):
"""Test creating a Python instance."""
# Mock successful instance creation
mock_sandbox = MagicMock()
mock_sandbox.sandbox_id = "01JCED8Z9Y6XQVK8M2NRST5WXY"
mock_sandbox_class.return_value = mock_sandbox
provider = AliyunCodeInterpreterProvider()
provider._initialized = True
provider._config = MagicMock()
instance = provider.create_instance("python")
assert instance.provider == "aliyun_codeinterpreter"
assert instance.status == "READY"
assert instance.metadata["language"] == "python"
@patch("agent.sandbox.providers.aliyun_codeinterpreter.CodeInterpreterSandbox")
def test_create_instance_javascript(self, mock_sandbox_class):
"""Test creating a JavaScript instance."""
mock_sandbox = MagicMock()
mock_sandbox.sandbox_id = "01JCED8Z9Y6XQVK8M2NRST5WXY"
mock_sandbox_class.return_value = mock_sandbox
provider = AliyunCodeInterpreterProvider()
provider._initialized = True
provider._config = MagicMock()
instance = provider.create_instance("javascript")
assert instance.metadata["language"] == "javascript"
def test_create_instance_not_initialized(self):
"""Test creating instance when provider not initialized."""
provider = AliyunCodeInterpreterProvider()
with pytest.raises(RuntimeError, match="Provider not initialized"):
provider.create_instance("python")
@patch("agent.sandbox.providers.aliyun_codeinterpreter.CodeInterpreterSandbox")
def test_execute_code_success(self, mock_sandbox_class):
"""Test successful code execution."""
# Mock sandbox instance
mock_sandbox = MagicMock()
mock_sandbox.context.execute.return_value = {
"results": [{"type": "stdout", "text": "Hello, World!"}, {"type": "result", "text": "None"}, {"type": "endOfExecution", "status": "ok"}],
"contextId": "kernel-12345-67890",
}
mock_sandbox_class.return_value = mock_sandbox
provider = AliyunCodeInterpreterProvider()
provider._initialized = True
provider._config = MagicMock()
result = provider.execute_code(instance_id="01JCED8Z9Y6XQVK8M2NRST5WXY", code="print('Hello, World!')", language="python", timeout=10)
assert result.stdout == "Hello, World!"
assert result.stderr == ""
assert result.exit_code == 0
assert result.execution_time > 0
@patch("agent.sandbox.providers.aliyun_codeinterpreter.CodeInterpreterSandbox")
def test_execute_code_timeout(self, mock_sandbox_class):
"""Test code execution timeout."""
from agentrun.utils.exception import ServerError
mock_sandbox = MagicMock()
mock_sandbox.context.execute.side_effect = ServerError(408, "Request timeout")
mock_sandbox_class.return_value = mock_sandbox
provider = AliyunCodeInterpreterProvider()
provider._initialized = True
provider._config = MagicMock()
with pytest.raises(TimeoutError, match="Execution timed out"):
provider.execute_code(instance_id="01JCED8Z9Y6XQVK8M2NRST5WXY", code="while True: pass", language="python", timeout=5)
@patch("agent.sandbox.providers.aliyun_codeinterpreter.CodeInterpreterSandbox")
def test_execute_code_with_error(self, mock_sandbox_class):
"""Test code execution with error."""
mock_sandbox = MagicMock()
mock_sandbox.context.execute.return_value = {
"results": [{"type": "stderr", "text": "Traceback..."}, {"type": "error", "text": "NameError: name 'x' is not defined"}, {"type": "endOfExecution", "status": "error"}]
}
mock_sandbox_class.return_value = mock_sandbox
provider = AliyunCodeInterpreterProvider()
provider._initialized = True
provider._config = MagicMock()
result = provider.execute_code(instance_id="01JCED8Z9Y6XQVK8M2NRST5WXY", code="print(x)", language="python")
assert result.exit_code != 0
assert len(result.stderr) > 0
def test_get_supported_languages(self):
"""Test getting supported languages."""
provider = AliyunCodeInterpreterProvider()
languages = provider.get_supported_languages()
assert "python" in languages
assert "javascript" in languages
def test_get_config_schema(self):
"""Test getting configuration schema."""
schema = AliyunCodeInterpreterProvider.get_config_schema()
assert "access_key_id" in schema
assert schema["access_key_id"]["required"] is True
assert "access_key_secret" in schema
assert schema["access_key_secret"]["required"] is True
assert "account_id" in schema
assert schema["account_id"]["required"] is True
assert "region" in schema
assert "template_name" in schema
assert "timeout" in schema
def test_validate_config_success(self):
"""Test successful configuration validation."""
provider = AliyunCodeInterpreterProvider()
is_valid, error_msg = provider.validate_config({"access_key_id": "LTAI5tXXXXXXXXXX", "account_id": "1234567890123456", "region": "cn-hangzhou"})
assert is_valid is True
assert error_msg is None
def test_validate_config_invalid_access_key(self):
"""Test validation with invalid access key format."""
provider = AliyunCodeInterpreterProvider()
is_valid, error_msg = provider.validate_config({"access_key_id": "INVALID_KEY"})
assert is_valid is False
assert "AccessKey ID format" in error_msg
def test_validate_config_missing_account_id(self):
"""Test validation with missing account ID."""
provider = AliyunCodeInterpreterProvider()
is_valid, error_msg = provider.validate_config({})
assert is_valid is False
assert "Account ID" in error_msg
def test_validate_config_invalid_region(self):
"""Test validation with invalid region."""
provider = AliyunCodeInterpreterProvider()
is_valid, error_msg = provider.validate_config(
{
"access_key_id": "LTAI5tXXXXXXXXXX",
"account_id": "1234567890123456", # Provide required field
"region": "us-west-1",
}
)
assert is_valid is False
assert "Invalid region" in error_msg
def test_validate_config_invalid_timeout(self):
"""Test validation with invalid timeout (> 30 seconds)."""
provider = AliyunCodeInterpreterProvider()
is_valid, error_msg = provider.validate_config(
{
"access_key_id": "LTAI5tXXXXXXXXXX",
"account_id": "1234567890123456", # Provide required field
"timeout": 60,
}
)
assert is_valid is False
assert "Timeout must be between 1 and 30 seconds" in error_msg
def test_normalize_language_python(self):
"""Test normalizing Python language identifier."""
provider = AliyunCodeInterpreterProvider()
assert provider._normalize_language("python") == "python"
assert provider._normalize_language("python3") == "python"
assert provider._normalize_language("PYTHON") == "python"
def test_normalize_language_javascript(self):
"""Test normalizing JavaScript language identifier."""
provider = AliyunCodeInterpreterProvider()
assert provider._normalize_language("javascript") == "javascript"
assert provider._normalize_language("nodejs") == "javascript"
assert provider._normalize_language("JavaScript") == "javascript"
class TestAliyunCodeInterpreterInterface:
"""Test that Aliyun provider correctly implements the interface."""
def test_aliyun_provider_is_abstract(self):
"""Test that AliyunCodeInterpreterProvider is a SandboxProvider."""
provider = AliyunCodeInterpreterProvider()
assert isinstance(provider, SandboxProvider)
def test_aliyun_provider_has_abstract_methods(self):
"""Test that AliyunCodeInterpreterProvider implements all abstract methods."""
provider = AliyunCodeInterpreterProvider()
assert hasattr(provider, "initialize")
assert callable(provider.initialize)
assert hasattr(provider, "create_instance")
assert callable(provider.create_instance)
assert hasattr(provider, "execute_code")
assert callable(provider.execute_code)
assert hasattr(provider, "destroy_instance")
assert callable(provider.destroy_instance)
assert hasattr(provider, "health_check")
assert callable(provider.health_check)
assert hasattr(provider, "get_supported_languages")
assert callable(provider.get_supported_languages)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/tests/test_aliyun_codeinterpreter.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:agent/sandbox/tests/test_aliyun_codeinterpreter_integration.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for Aliyun Code Interpreter provider.
These tests require real Aliyun credentials and will make actual API calls.
To run these tests, set the following environment variables:
export AGENTRUN_ACCESS_KEY_ID="LTAI5t..."
export AGENTRUN_ACCESS_KEY_SECRET="..."
export AGENTRUN_ACCOUNT_ID="1234567890..." # Aliyun primary account ID (主账号ID)
export AGENTRUN_REGION="cn-hangzhou" # Note: AGENTRUN_REGION (SDK will read this)
Then run:
pytest agent/sandbox/tests/test_aliyun_codeinterpreter_integration.py -v
Official Documentation: https://help.aliyun.com/zh/functioncompute/fc/sandbox-sandbox-code-interepreter
"""
import os
import pytest
from agent.sandbox.providers.aliyun_codeinterpreter import AliyunCodeInterpreterProvider
# Skip all tests if credentials are not provided
pytestmark = pytest.mark.skipif(
not all(
[
os.getenv("AGENTRUN_ACCESS_KEY_ID"),
os.getenv("AGENTRUN_ACCESS_KEY_SECRET"),
os.getenv("AGENTRUN_ACCOUNT_ID"),
]
),
reason="Aliyun credentials not set. Set AGENTRUN_ACCESS_KEY_ID, AGENTRUN_ACCESS_KEY_SECRET, and AGENTRUN_ACCOUNT_ID.",
)
@pytest.fixture
def aliyun_config():
"""Get Aliyun configuration from environment variables."""
return {
"access_key_id": os.getenv("AGENTRUN_ACCESS_KEY_ID"),
"access_key_secret": os.getenv("AGENTRUN_ACCESS_KEY_SECRET"),
"account_id": os.getenv("AGENTRUN_ACCOUNT_ID"),
"region": os.getenv("AGENTRUN_REGION", "cn-hangzhou"),
"template_name": os.getenv("AGENTRUN_TEMPLATE_NAME", ""),
"timeout": 30,
}
@pytest.fixture
def provider(aliyun_config):
"""Create an initialized Aliyun provider."""
provider = AliyunCodeInterpreterProvider()
initialized = provider.initialize(aliyun_config)
if not initialized:
pytest.skip("Failed to initialize Aliyun provider. Check credentials, account ID, and network.")
return provider
@pytest.mark.integration
class TestAliyunCodeInterpreterIntegration:
"""Integration tests for Aliyun Code Interpreter provider."""
def test_initialize_provider(self, aliyun_config):
"""Test provider initialization with real credentials."""
provider = AliyunCodeInterpreterProvider()
result = provider.initialize(aliyun_config)
assert result is True
assert provider._initialized is True
def test_health_check(self, provider):
"""Test health check with real API."""
result = provider.health_check()
assert result is True
def test_get_supported_languages(self, provider):
"""Test getting supported languages."""
languages = provider.get_supported_languages()
assert "python" in languages
assert "javascript" in languages
assert isinstance(languages, list)
def test_create_python_instance(self, provider):
"""Test creating a Python sandbox instance."""
try:
instance = provider.create_instance("python")
assert instance.provider == "aliyun_codeinterpreter"
assert instance.status in ["READY", "CREATING"]
assert instance.metadata["language"] == "python"
assert len(instance.instance_id) > 0
# Clean up
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"Instance creation failed: {str(e)}. API might not be available yet.")
def test_execute_python_code(self, provider):
"""Test executing Python code in the sandbox."""
try:
# Create instance
instance = provider.create_instance("python")
# Execute simple code
result = provider.execute_code(
instance_id=instance.instance_id,
code="print('Hello from Aliyun Code Interpreter!')\nprint(42)",
language="python",
timeout=30, # Max 30 seconds
)
assert result.exit_code == 0
assert "Hello from Aliyun Code Interpreter!" in result.stdout
assert "42" in result.stdout
assert result.execution_time > 0
# Clean up
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"Code execution test failed: {str(e)}. API might not be available yet.")
def test_execute_python_code_with_arguments(self, provider):
"""Test executing Python code with arguments parameter."""
try:
# Create instance
instance = provider.create_instance("python")
# Execute code with arguments
result = provider.execute_code(
instance_id=instance.instance_id,
code="""def main(name: str, count: int) -> dict:
return {"message": f"Hello {name}!" * count}
""",
language="python",
timeout=30,
arguments={"name": "World", "count": 2}
)
assert result.exit_code == 0
assert "Hello World!Hello World!" in result.stdout
# Clean up
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"Arguments test failed: {str(e)}. API might not be available yet.")
def test_execute_python_code_with_error(self, provider):
"""Test executing Python code that produces an error."""
try:
# Create instance
instance = provider.create_instance("python")
# Execute code with error
result = provider.execute_code(instance_id=instance.instance_id, code="raise ValueError('Test error')", language="python", timeout=30)
assert result.exit_code != 0
assert len(result.stderr) > 0 or "ValueError" in result.stdout
# Clean up
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"Error handling test failed: {str(e)}. API might not be available yet.")
def test_execute_javascript_code(self, provider):
"""Test executing JavaScript code in the sandbox."""
try:
# Create instance
instance = provider.create_instance("javascript")
# Execute simple code
result = provider.execute_code(instance_id=instance.instance_id, code="console.log('Hello from JavaScript!');", language="javascript", timeout=30)
assert result.exit_code == 0
assert "Hello from JavaScript!" in result.stdout
# Clean up
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"JavaScript execution test failed: {str(e)}. API might not be available yet.")
def test_execute_javascript_code_with_arguments(self, provider):
"""Test executing JavaScript code with arguments parameter."""
try:
# Create instance
instance = provider.create_instance("javascript")
# Execute code with arguments
result = provider.execute_code(
instance_id=instance.instance_id,
code="""function main(args) {
const { name, count } = args;
return `Hello ${name}!`.repeat(count);
}""",
language="javascript",
timeout=30,
arguments={"name": "World", "count": 2}
)
assert result.exit_code == 0
assert "Hello World!Hello World!" in result.stdout
# Clean up
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"JavaScript arguments test failed: {str(e)}. API might not be available yet.")
def test_destroy_instance(self, provider):
"""Test destroying a sandbox instance."""
try:
# Create instance
instance = provider.create_instance("python")
# Destroy instance
result = provider.destroy_instance(instance.instance_id)
# Note: The API might return True immediately or async
assert result is True or result is False
except Exception as e:
pytest.skip(f"Destroy instance test failed: {str(e)}. API might not be available yet.")
def test_config_validation(self, provider):
"""Test configuration validation."""
# Valid config
is_valid, error = provider.validate_config({"access_key_id": "LTAI5tXXXXXXXXXX", "account_id": "1234567890123456", "region": "cn-hangzhou", "timeout": 30})
assert is_valid is True
assert error is None
# Invalid access key
is_valid, error = provider.validate_config({"access_key_id": "INVALID_KEY"})
assert is_valid is False
# Missing account ID
is_valid, error = provider.validate_config({})
assert is_valid is False
assert "Account ID" in error
def test_timeout_limit(self, provider):
"""Test that timeout is limited to 30 seconds."""
# Timeout > 30 should be clamped to 30
provider2 = AliyunCodeInterpreterProvider()
provider2.initialize(
{
"access_key_id": os.getenv("AGENTRUN_ACCESS_KEY_ID"),
"access_key_secret": os.getenv("AGENTRUN_ACCESS_KEY_SECRET"),
"account_id": os.getenv("AGENTRUN_ACCOUNT_ID"),
"timeout": 60, # Request 60 seconds
}
)
# Should be clamped to 30
assert provider2.timeout == 30
@pytest.mark.integration
class TestAliyunCodeInterpreterScenarios:
"""Test real-world usage scenarios."""
def test_data_processing_workflow(self, provider):
"""Test a simple data processing workflow."""
try:
instance = provider.create_instance("python")
# Execute data processing code
code = """
import json
data = [{"name": "Alice", "age": 30}, {"name": "Bob", "age": 25}]
result = json.dumps(data, indent=2)
print(result)
"""
result = provider.execute_code(instance_id=instance.instance_id, code=code, language="python", timeout=30)
assert result.exit_code == 0
assert "Alice" in result.stdout
assert "Bob" in result.stdout
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"Data processing test failed: {str(e)}")
def test_string_manipulation(self, provider):
"""Test string manipulation operations."""
try:
instance = provider.create_instance("python")
code = """
text = "Hello, World!"
print(text.upper())
print(text.lower())
print(text.replace("World", "Aliyun"))
"""
result = provider.execute_code(instance_id=instance.instance_id, code=code, language="python", timeout=30)
assert result.exit_code == 0
assert "HELLO, WORLD!" in result.stdout
assert "hello, world!" in result.stdout
assert "Hello, Aliyun!" in result.stdout
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"String manipulation test failed: {str(e)}")
def test_context_persistence(self, provider):
"""Test code execution with context persistence."""
try:
instance = provider.create_instance("python")
# First execution - define variable
result1 = provider.execute_code(instance_id=instance.instance_id, code="x = 42\nprint(x)", language="python", timeout=30)
assert result1.exit_code == 0
# Second execution - use variable
# Note: Context persistence depends on whether the contextId is reused
result2 = provider.execute_code(instance_id=instance.instance_id, code="print(f'x is {x}')", language="python", timeout=30)
# Context might or might not persist depending on API implementation
assert result2.exit_code == 0
provider.destroy_instance(instance.instance_id)
except Exception as e:
pytest.skip(f"Context persistence test failed: {str(e)}")
def test_without_credentials():
"""Test that tests are skipped without credentials."""
# This test should always run (not skipped)
if all(
[
os.getenv("AGENTRUN_ACCESS_KEY_ID"),
os.getenv("AGENTRUN_ACCESS_KEY_SECRET"),
os.getenv("AGENTRUN_ACCOUNT_ID"),
]
):
assert True # Credentials are set
else:
assert True # Credentials not set, test still passes
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/tests/test_aliyun_codeinterpreter_integration.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:agent/sandbox/tests/test_providers.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for sandbox provider abstraction layer.
"""
import pytest
from unittest.mock import Mock, patch
import requests
from agent.sandbox.providers.base import SandboxProvider, SandboxInstance, ExecutionResult
from agent.sandbox.providers.manager import ProviderManager
from agent.sandbox.providers.self_managed import SelfManagedProvider
class TestSandboxDataclasses:
"""Test sandbox dataclasses."""
def test_sandbox_instance_creation(self):
"""Test SandboxInstance dataclass creation."""
instance = SandboxInstance(
instance_id="test-123",
provider="self_managed",
status="running",
metadata={"language": "python"}
)
assert instance.instance_id == "test-123"
assert instance.provider == "self_managed"
assert instance.status == "running"
assert instance.metadata == {"language": "python"}
def test_sandbox_instance_default_metadata(self):
"""Test SandboxInstance with None metadata."""
instance = SandboxInstance(
instance_id="test-123",
provider="self_managed",
status="running",
metadata=None
)
assert instance.metadata == {}
def test_execution_result_creation(self):
"""Test ExecutionResult dataclass creation."""
result = ExecutionResult(
stdout="Hello, World!",
stderr="",
exit_code=0,
execution_time=1.5,
metadata={"status": "success"}
)
assert result.stdout == "Hello, World!"
assert result.stderr == ""
assert result.exit_code == 0
assert result.execution_time == 1.5
assert result.metadata == {"status": "success"}
def test_execution_result_default_metadata(self):
"""Test ExecutionResult with None metadata."""
result = ExecutionResult(
stdout="output",
stderr="error",
exit_code=1,
execution_time=0.5,
metadata=None
)
assert result.metadata == {}
class TestProviderManager:
"""Test ProviderManager functionality."""
def test_manager_initialization(self):
"""Test ProviderManager initialization."""
manager = ProviderManager()
assert manager.current_provider is None
assert manager.current_provider_name is None
assert not manager.is_configured()
def test_set_provider(self):
"""Test setting a provider."""
manager = ProviderManager()
mock_provider = Mock(spec=SandboxProvider)
manager.set_provider("self_managed", mock_provider)
assert manager.current_provider == mock_provider
assert manager.current_provider_name == "self_managed"
assert manager.is_configured()
def test_get_provider(self):
"""Test getting the current provider."""
manager = ProviderManager()
mock_provider = Mock(spec=SandboxProvider)
manager.set_provider("self_managed", mock_provider)
assert manager.get_provider() == mock_provider
def test_get_provider_name(self):
"""Test getting the current provider name."""
manager = ProviderManager()
mock_provider = Mock(spec=SandboxProvider)
manager.set_provider("self_managed", mock_provider)
assert manager.get_provider_name() == "self_managed"
def test_get_provider_when_not_set(self):
"""Test getting provider when none is set."""
manager = ProviderManager()
assert manager.get_provider() is None
assert manager.get_provider_name() is None
class TestSelfManagedProvider:
"""Test SelfManagedProvider implementation."""
def test_provider_initialization(self):
"""Test provider initialization."""
provider = SelfManagedProvider()
assert provider.endpoint == "http://localhost:9385"
assert provider.timeout == 30
assert provider.max_retries == 3
assert provider.pool_size == 10
assert not provider._initialized
@patch('requests.get')
def test_initialize_success(self, mock_get):
"""Test successful initialization."""
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
provider = SelfManagedProvider()
result = provider.initialize({
"endpoint": "http://test-endpoint:9385",
"timeout": 60,
"max_retries": 5,
"pool_size": 20
})
assert result is True
assert provider.endpoint == "http://test-endpoint:9385"
assert provider.timeout == 60
assert provider.max_retries == 5
assert provider.pool_size == 20
assert provider._initialized
mock_get.assert_called_once_with("http://test-endpoint:9385/healthz", timeout=5)
@patch('requests.get')
def test_initialize_failure(self, mock_get):
"""Test initialization failure."""
mock_get.side_effect = Exception("Connection error")
provider = SelfManagedProvider()
result = provider.initialize({"endpoint": "http://invalid:9385"})
assert result is False
assert not provider._initialized
def test_initialize_default_config(self):
"""Test initialization with default config."""
with patch('requests.get') as mock_get:
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
provider = SelfManagedProvider()
result = provider.initialize({})
assert result is True
assert provider.endpoint == "http://localhost:9385"
assert provider.timeout == 30
def test_create_instance_python(self):
"""Test creating a Python instance."""
provider = SelfManagedProvider()
provider._initialized = True
instance = provider.create_instance("python")
assert instance.provider == "self_managed"
assert instance.status == "running"
assert instance.metadata["language"] == "python"
assert instance.metadata["endpoint"] == "http://localhost:9385"
assert len(instance.instance_id) > 0 # Verify instance_id exists
def test_create_instance_nodejs(self):
"""Test creating a Node.js instance."""
provider = SelfManagedProvider()
provider._initialized = True
instance = provider.create_instance("nodejs")
assert instance.metadata["language"] == "nodejs"
def test_create_instance_not_initialized(self):
"""Test creating instance when provider not initialized."""
provider = SelfManagedProvider()
with pytest.raises(RuntimeError, match="Provider not initialized"):
provider.create_instance("python")
@patch('requests.post')
def test_execute_code_success(self, mock_post):
"""Test successful code execution."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"status": "success",
"stdout": '{"result": 42}',
"stderr": "",
"exit_code": 0,
"time_used_ms": 100.0,
"memory_used_kb": 1024.0
}
mock_post.return_value = mock_response
provider = SelfManagedProvider()
provider._initialized = True
result = provider.execute_code(
instance_id="test-123",
code="def main(): return {'result': 42}",
language="python",
timeout=10
)
assert result.stdout == '{"result": 42}'
assert result.stderr == ""
assert result.exit_code == 0
assert result.execution_time > 0
assert result.metadata["status"] == "success"
assert result.metadata["instance_id"] == "test-123"
@patch('requests.post')
def test_execute_code_timeout(self, mock_post):
"""Test code execution timeout."""
mock_post.side_effect = requests.Timeout()
provider = SelfManagedProvider()
provider._initialized = True
with pytest.raises(TimeoutError, match="Execution timed out"):
provider.execute_code(
instance_id="test-123",
code="while True: pass",
language="python",
timeout=5
)
@patch('requests.post')
def test_execute_code_http_error(self, mock_post):
"""Test code execution with HTTP error."""
mock_response = Mock()
mock_response.status_code = 500
mock_response.text = "Internal Server Error"
mock_post.return_value = mock_response
provider = SelfManagedProvider()
provider._initialized = True
with pytest.raises(RuntimeError, match="HTTP 500"):
provider.execute_code(
instance_id="test-123",
code="invalid code",
language="python"
)
def test_execute_code_not_initialized(self):
"""Test executing code when provider not initialized."""
provider = SelfManagedProvider()
with pytest.raises(RuntimeError, match="Provider not initialized"):
provider.execute_code(
instance_id="test-123",
code="print('hello')",
language="python"
)
def test_destroy_instance(self):
"""Test destroying an instance (no-op for self-managed)."""
provider = SelfManagedProvider()
provider._initialized = True
# For self-managed, destroy_instance is a no-op
result = provider.destroy_instance("test-123")
assert result is True
@patch('requests.get')
def test_health_check_success(self, mock_get):
"""Test successful health check."""
mock_response = Mock()
mock_response.status_code = 200
mock_get.return_value = mock_response
provider = SelfManagedProvider()
result = provider.health_check()
assert result is True
mock_get.assert_called_once_with("http://localhost:9385/healthz", timeout=5)
@patch('requests.get')
def test_health_check_failure(self, mock_get):
"""Test health check failure."""
mock_get.side_effect = Exception("Connection error")
provider = SelfManagedProvider()
result = provider.health_check()
assert result is False
def test_get_supported_languages(self):
"""Test getting supported languages."""
provider = SelfManagedProvider()
languages = provider.get_supported_languages()
assert "python" in languages
assert "nodejs" in languages
assert "javascript" in languages
def test_get_config_schema(self):
"""Test getting configuration schema."""
schema = SelfManagedProvider.get_config_schema()
assert "endpoint" in schema
assert schema["endpoint"]["type"] == "string"
assert schema["endpoint"]["required"] is True
assert schema["endpoint"]["default"] == "http://localhost:9385"
assert "timeout" in schema
assert schema["timeout"]["type"] == "integer"
assert schema["timeout"]["default"] == 30
assert "max_retries" in schema
assert schema["max_retries"]["type"] == "integer"
assert "pool_size" in schema
assert schema["pool_size"]["type"] == "integer"
def test_normalize_language_python(self):
"""Test normalizing Python language identifier."""
provider = SelfManagedProvider()
assert provider._normalize_language("python") == "python"
assert provider._normalize_language("python3") == "python"
assert provider._normalize_language("PYTHON") == "python"
assert provider._normalize_language("Python3") == "python"
def test_normalize_language_javascript(self):
"""Test normalizing JavaScript language identifier."""
provider = SelfManagedProvider()
assert provider._normalize_language("javascript") == "nodejs"
assert provider._normalize_language("nodejs") == "nodejs"
assert provider._normalize_language("JavaScript") == "nodejs"
assert provider._normalize_language("NodeJS") == "nodejs"
def test_normalize_language_default(self):
"""Test language normalization with empty/unknown input."""
provider = SelfManagedProvider()
assert provider._normalize_language("") == "python"
assert provider._normalize_language(None) == "python"
assert provider._normalize_language("unknown") == "unknown"
class TestProviderInterface:
"""Test that providers correctly implement the interface."""
def test_self_managed_provider_is_abstract(self):
"""Test that SelfManagedProvider is a SandboxProvider."""
provider = SelfManagedProvider()
assert isinstance(provider, SandboxProvider)
def test_self_managed_provider_has_abstract_methods(self):
"""Test that SelfManagedProvider implements all abstract methods."""
provider = SelfManagedProvider()
# Check all abstract methods are implemented
assert hasattr(provider, 'initialize')
assert callable(provider.initialize)
assert hasattr(provider, 'create_instance')
assert callable(provider.create_instance)
assert hasattr(provider, 'execute_code')
assert callable(provider.execute_code)
assert hasattr(provider, 'destroy_instance')
assert callable(provider.destroy_instance)
assert hasattr(provider, 'health_check')
assert callable(provider.health_check)
assert hasattr(provider, 'get_supported_languages')
assert callable(provider.get_supported_languages)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/tests/test_providers.py",
"license": "Apache License 2.0",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:agent/sandbox/tests/verify_sdk.py | #!/usr/bin/env python3
"""
Quick verification script for Aliyun Code Interpreter provider using official SDK.
"""
import importlib.util
import sys
sys.path.insert(0, ".")
print("=" * 60)
print("Aliyun Code Interpreter Provider - SDK Verification")
print("=" * 60)
# Test 1: Import provider
print("\n[1/5] Testing provider import...")
try:
from agent.sandbox.providers.aliyun_codeinterpreter import AliyunCodeInterpreterProvider
print("✓ Provider imported successfully")
except ImportError as e:
print(f"✗ Import failed: {e}")
sys.exit(1)
# Test 2: Check provider class
print("\n[2/5] Testing provider class...")
provider = AliyunCodeInterpreterProvider()
assert hasattr(provider, "initialize")
assert hasattr(provider, "create_instance")
assert hasattr(provider, "execute_code")
assert hasattr(provider, "destroy_instance")
assert hasattr(provider, "health_check")
print("✓ Provider has all required methods")
# Test 3: Check SDK imports
print("\n[3/5] Testing SDK imports...")
try:
# Check if agentrun SDK is available using importlib
if (
importlib.util.find_spec("agentrun.sandbox") is None
or importlib.util.find_spec("agentrun.utils.config") is None
or importlib.util.find_spec("agentrun.utils.exception") is None
):
raise ImportError("agentrun SDK not found")
# Verify imports work (assign to _ to indicate they're intentionally unused)
from agentrun.sandbox import CodeInterpreterSandbox, TemplateType, CodeLanguage
from agentrun.utils.config import Config
from agentrun.utils.exception import ServerError
_ = (CodeInterpreterSandbox, TemplateType, CodeLanguage, Config, ServerError)
print("✓ SDK modules imported successfully")
except ImportError as e:
print(f"✗ SDK import failed: {e}")
sys.exit(1)
# Test 4: Check config schema
print("\n[4/5] Testing configuration schema...")
schema = AliyunCodeInterpreterProvider.get_config_schema()
required_fields = ["access_key_id", "access_key_secret", "account_id"]
for field in required_fields:
assert field in schema
assert schema[field]["required"] is True
print(f"✓ All required fields present: {', '.join(required_fields)}")
# Test 5: Check supported languages
print("\n[5/5] Testing supported languages...")
languages = provider.get_supported_languages()
assert "python" in languages
assert "javascript" in languages
print(f"✓ Supported languages: {', '.join(languages)}")
print("\n" + "=" * 60)
print("All verification tests passed! ✓")
print("=" * 60)
print("\nNote: This provider now uses the official agentrun-sdk.")
print("SDK Documentation: https://github.com/Serverless-Devs/agentrun-sdk-python")
print("API Documentation: https://help.aliyun.com/zh/functioncompute/fc/sandbox-sandbox-code-interepreter")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/sandbox/tests/verify_sdk.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_chat_completions_openai.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
bulk_upload_documents,
chat_completions_openai,
create_chat_assistant,
delete_chat_assistants,
list_documents,
parse_documents,
)
from utils import wait_for
@wait_for(200, 1, "Document parsing timeout")
def _parse_done(auth, dataset_id, document_ids=None):
res = list_documents(auth, dataset_id)
target_docs = res["data"]["docs"]
if document_ids is None:
return all(doc.get("run") == "DONE" for doc in target_docs)
target_ids = set(document_ids)
for doc in target_docs:
if doc.get("id") in target_ids and doc.get("run") != "DONE":
return False
return True
class TestChatCompletionsOpenAI:
"""Test cases for the OpenAI-compatible chat completions endpoint"""
@pytest.mark.p2
def test_openai_chat_completion_non_stream(self, HttpApiAuth, add_dataset_func, tmp_path, request):
"""Test OpenAI-compatible endpoint returns proper response with token usage"""
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0, res
_parse_done(HttpApiAuth, dataset_id, document_ids)
res = create_chat_assistant(HttpApiAuth, {"name": "openai_endpoint_test", "dataset_ids": [dataset_id]})
assert res["code"] == 0, res
chat_id = res["data"]["id"]
request.addfinalizer(lambda: delete_chat_assistants(HttpApiAuth))
res = chat_completions_openai(
HttpApiAuth,
chat_id,
{
"model": "model", # Required by OpenAI-compatible API, value is ignored by RAGFlow
"messages": [{"role": "user", "content": "hello"}],
"stream": False,
},
)
# Verify OpenAI-compatible response structure
assert "choices" in res, f"Response should contain 'choices': {res}"
assert len(res["choices"]) > 0, f"'choices' should not be empty: {res}"
assert "message" in res["choices"][0], f"Choice should contain 'message': {res}"
assert "content" in res["choices"][0]["message"], f"Message should contain 'content': {res}"
# Verify token usage is present and uses actual token counts (not character counts)
assert "usage" in res, f"Response should contain 'usage': {res}"
usage = res["usage"]
assert "prompt_tokens" in usage, f"'usage' should contain 'prompt_tokens': {usage}"
assert "completion_tokens" in usage, f"'usage' should contain 'completion_tokens': {usage}"
assert "total_tokens" in usage, f"'usage' should contain 'total_tokens': {usage}"
assert usage["total_tokens"] == usage["prompt_tokens"] + usage["completion_tokens"], \
f"total_tokens should equal prompt_tokens + completion_tokens: {usage}"
@pytest.mark.p2
def test_openai_chat_completion_token_count_reasonable(self, HttpApiAuth, add_dataset_func, tmp_path, request):
"""Test that token counts are reasonable (using tiktoken, not character counts)"""
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0, res
_parse_done(HttpApiAuth, dataset_id, document_ids)
res = create_chat_assistant(HttpApiAuth, {"name": "openai_token_count_test", "dataset_ids": [dataset_id]})
assert res["code"] == 0, res
chat_id = res["data"]["id"]
request.addfinalizer(lambda: delete_chat_assistants(HttpApiAuth))
# Use a message with known token count
# "hello" is 1 token in cl100k_base encoding
res = chat_completions_openai(
HttpApiAuth,
chat_id,
{
"model": "model", # Required by OpenAI-compatible API, value is ignored by RAGFlow
"messages": [{"role": "user", "content": "hello"}],
"stream": False,
},
)
assert "usage" in res, f"Response should contain 'usage': {res}"
usage = res["usage"]
# The prompt tokens should be reasonable for the message "hello" plus any system context
# If using len() instead of tiktoken, a short response could have equal or fewer tokens
# than characters, which would be incorrect
# With tiktoken, "hello" = 1 token, so prompt_tokens should include that plus context
assert usage["prompt_tokens"] > 0, f"prompt_tokens should be greater than 0: {usage}"
assert usage["completion_tokens"] > 0, f"completion_tokens should be greater than 0: {usage}"
@pytest.mark.p2
def test_openai_chat_completion_invalid_chat(self, HttpApiAuth):
"""Test OpenAI endpoint returns error for invalid chat ID"""
res = chat_completions_openai(
HttpApiAuth,
"invalid_chat_id",
{
"model": "model", # Required by OpenAI-compatible API, value is ignored by RAGFlow
"messages": [{"role": "user", "content": "hello"}],
"stream": False,
},
)
# Should return an error (format may vary based on implementation)
assert "error" in res or res.get("code") != 0, f"Should return error for invalid chat: {res}"
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, requires_valid_chat, expected_message",
[
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": "invalid_extra_body",
},
False,
"extra_body must be an object.",
),
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": {"reference_metadata": "invalid_reference_metadata"},
},
False,
"reference_metadata must be an object.",
),
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": {"reference_metadata": {"fields": "author"}},
},
False,
"reference_metadata.fields must be an array.",
),
(
{
"model": "model",
"messages": [],
},
False,
"You have to provide messages.",
),
(
{
"model": "model",
"messages": [{"role": "assistant", "content": "hello"}],
},
False,
"The last content of this conversation is not from user.",
),
(
{
"model": "model",
"messages": [{"role": "user", "content": "hello"}],
"extra_body": {"metadata_condition": "invalid"},
},
True,
"metadata_condition must be an object.",
),
],
)
def test_openai_chat_completion_request_validation(
self,
HttpApiAuth,
request,
payload,
requires_valid_chat,
expected_message,
):
chat_id = "invalid_chat_id"
if requires_valid_chat:
res = create_chat_assistant(HttpApiAuth, {"name": "openai_validation_case", "dataset_ids": []})
assert res["code"] == 0, res
chat_id = res["data"]["id"]
request.addfinalizer(lambda: delete_chat_assistants(HttpApiAuth))
res = chat_completions_openai(HttpApiAuth, chat_id, payload)
assert res.get("code") != 0, res
assert expected_message in res.get("message", ""), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_chat_completions_openai.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:admin/client/http_client.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import json
import typing
from typing import Any, Dict, Optional
import requests
# from requests.sessions import HTTPAdapter
class HttpClient:
def __init__(
self,
host: str = "127.0.0.1",
port: int = 9381,
api_version: str = "v1",
api_key: Optional[str] = None,
connect_timeout: float = 5.0,
read_timeout: float = 60.0,
verify_ssl: bool = False,
) -> None:
self.host = host
self.port = port
self.api_version = api_version
self.api_key = api_key
self.login_token: str | None = None
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self.verify_ssl = verify_ssl
def api_base(self) -> str:
return f"{self.host}:{self.port}/api/{self.api_version}"
def non_api_base(self) -> str:
return f"{self.host}:{self.port}/{self.api_version}"
def build_url(self, path: str, use_api_base: bool = True) -> str:
base = self.api_base() if use_api_base else self.non_api_base()
if self.verify_ssl:
return f"https://{base}/{path.lstrip('/')}"
else:
return f"http://{base}/{path.lstrip('/')}"
def _headers(self, auth_kind: Optional[str], extra: Optional[Dict[str, str]]) -> Dict[str, str]:
headers = {}
if auth_kind == "api" and self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
elif auth_kind == "web" and self.login_token:
headers["Authorization"] = self.login_token
elif auth_kind == "admin" and self.login_token:
headers["Authorization"] = self.login_token
else:
pass
if extra:
headers.update(extra)
return headers
def request(
self,
method: str,
path: str,
*,
use_api_base: bool = True,
auth_kind: Optional[str] = "api",
headers: Optional[Dict[str, str]] = None,
json_body: Optional[Dict[str, Any]] = None,
data: Any = None,
files: Any = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False,
iterations: int = 1,
) -> requests.Response | dict:
url = self.build_url(path, use_api_base=use_api_base)
merged_headers = self._headers(auth_kind, headers)
# timeout: Tuple[float, float] = (self.connect_timeout, self.read_timeout)
session = requests.Session()
# adapter = HTTPAdapter(pool_connections=100, pool_maxsize=100)
# session.mount("http://", adapter)
http_function = typing.Any
match method:
case "GET":
http_function = session.get
case "POST":
http_function = session.post
case "PUT":
http_function = session.put
case "DELETE":
http_function = session.delete
case "PATCH":
http_function = session.patch
case _:
raise ValueError(f"Invalid HTTP method: {method}")
if iterations > 1:
response_list = []
total_duration = 0.0
for _ in range(iterations):
start_time = time.perf_counter()
response = http_function(url, headers=merged_headers, json=json_body, data=data, stream=stream)
# response = session.get(url, headers=merged_headers, json=json_body, data=data, stream=stream)
# response = requests.request(
# method=method,
# url=url,
# headers=merged_headers,
# json=json_body,
# data=data,
# files=files,
# params=params,
# stream=stream,
# verify=self.verify_ssl,
# )
end_time = time.perf_counter()
total_duration += end_time - start_time
response_list.append(response)
return {"duration": total_duration, "response_list": response_list}
else:
return http_function(url, headers=merged_headers, json=json_body, data=data, stream=stream)
# return session.get(url, headers=merged_headers, json=json_body, data=data, stream=stream)
# return requests.request(
# method=method,
# url=url,
# headers=merged_headers,
# json=json_body,
# data=data,
# files=files,
# params=params,
# stream=stream,
# verify=self.verify_ssl,
# )
def request_json(
self,
method: str,
path: str,
*,
use_api_base: bool = True,
auth_kind: Optional[str] = "api",
headers: Optional[Dict[str, str]] = None,
json_body: Optional[Dict[str, Any]] = None,
data: Any = None,
files: Any = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False,
) -> Dict[str, Any]:
response = self.request(
method,
path,
use_api_base=use_api_base,
auth_kind=auth_kind,
headers=headers,
json_body=json_body,
data=data,
files=files,
params=params,
stream=stream,
)
try:
return response.json()
except Exception as exc:
raise ValueError(f"Non-JSON response from {path}: {exc}") from exc
@staticmethod
def parse_json_bytes(raw: bytes) -> Dict[str, Any]:
try:
return json.loads(raw.decode("utf-8"))
except Exception as exc:
raise ValueError(f"Invalid JSON payload: {exc}") from exc
| {
"repo_id": "infiniflow/ragflow",
"file_path": "admin/client/http_client.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:admin/client/parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lark import Transformer
GRAMMAR = r"""
start: command
command: sql_command | meta_command
sql_command: login_user
| ping_server
| list_services
| show_service
| startup_service
| shutdown_service
| restart_service
| register_user
| list_users
| show_user
| drop_user
| alter_user
| create_user
| activate_user
| list_datasets
| list_agents
| create_role
| drop_role
| alter_role
| list_roles
| show_role
| grant_permission
| revoke_permission
| alter_user_role
| show_user_permission
| show_version
| grant_admin
| revoke_admin
| set_variable
| show_variable
| list_variables
| list_configs
| list_environments
| generate_key
| list_keys
| drop_key
| show_current_user
| set_default_llm
| set_default_vlm
| set_default_embedding
| set_default_reranker
| set_default_asr
| set_default_tts
| reset_default_llm
| reset_default_vlm
| reset_default_embedding
| reset_default_reranker
| reset_default_asr
| reset_default_tts
| create_model_provider
| drop_model_provider
| create_user_dataset_with_parser
| create_user_dataset_with_pipeline
| drop_user_dataset
| list_user_datasets
| list_user_dataset_files
| list_user_agents
| list_user_chats
| create_user_chat
| drop_user_chat
| list_user_model_providers
| list_user_default_models
| parse_dataset_docs
| parse_dataset_sync
| parse_dataset_async
| import_docs_into_dataset
| search_on_datasets
| create_chat_session
| drop_chat_session
| list_chat_sessions
| chat_on_session
| benchmark
// meta command definition
meta_command: "\\" meta_command_name [meta_args]
meta_command_name: /[a-zA-Z?]+/
meta_args: (meta_arg)+
meta_arg: /[^\\s"']+/ | quoted_string
// command definition
LOGIN: "LOGIN"i
REGISTER: "REGISTER"i
LIST: "LIST"i
SERVICES: "SERVICES"i
SHOW: "SHOW"i
CREATE: "CREATE"i
SERVICE: "SERVICE"i
SHUTDOWN: "SHUTDOWN"i
STARTUP: "STARTUP"i
RESTART: "RESTART"i
USERS: "USERS"i
DROP: "DROP"i
USER: "USER"i
ALTER: "ALTER"i
ACTIVE: "ACTIVE"i
ADMIN: "ADMIN"i
PASSWORD: "PASSWORD"i
DATASET: "DATASET"i
DATASETS: "DATASETS"i
OF: "OF"i
AGENTS: "AGENTS"i
ROLE: "ROLE"i
ROLES: "ROLES"i
DESCRIPTION: "DESCRIPTION"i
GRANT: "GRANT"i
REVOKE: "REVOKE"i
ALL: "ALL"i
PERMISSION: "PERMISSION"i
TO: "TO"i
FROM: "FROM"i
FOR: "FOR"i
RESOURCES: "RESOURCES"i
ON: "ON"i
SET: "SET"i
RESET: "RESET"i
VERSION: "VERSION"i
VAR: "VAR"i
VARS: "VARS"i
CONFIGS: "CONFIGS"i
ENVS: "ENVS"i
KEY: "KEY"i
KEYS: "KEYS"i
GENERATE: "GENERATE"i
MODEL: "MODEL"i
MODELS: "MODELS"i
PROVIDER: "PROVIDER"i
PROVIDERS: "PROVIDERS"i
DEFAULT: "DEFAULT"i
CHATS: "CHATS"i
CHAT: "CHAT"i
FILES: "FILES"i
AS: "AS"i
PARSE: "PARSE"i
IMPORT: "IMPORT"i
INTO: "INTO"i
WITH: "WITH"i
PARSER: "PARSER"i
PIPELINE: "PIPELINE"i
SEARCH: "SEARCH"i
CURRENT: "CURRENT"i
LLM: "LLM"i
VLM: "VLM"i
EMBEDDING: "EMBEDDING"i
RERANKER: "RERANKER"i
ASR: "ASR"i
TTS: "TTS"i
ASYNC: "ASYNC"i
SYNC: "SYNC"i
BENCHMARK: "BENCHMARK"i
PING: "PING"i
SESSION: "SESSION"i
SESSIONS: "SESSIONS"i
login_user: LOGIN USER quoted_string ";"
list_services: LIST SERVICES ";"
show_service: SHOW SERVICE NUMBER ";"
startup_service: STARTUP SERVICE NUMBER ";"
shutdown_service: SHUTDOWN SERVICE NUMBER ";"
restart_service: RESTART SERVICE NUMBER ";"
register_user: REGISTER USER quoted_string AS quoted_string PASSWORD quoted_string ";"
list_users: LIST USERS ";"
drop_user: DROP USER quoted_string ";"
alter_user: ALTER USER PASSWORD quoted_string quoted_string ";"
show_user: SHOW USER quoted_string ";"
create_user: CREATE USER quoted_string quoted_string ";"
activate_user: ALTER USER ACTIVE quoted_string status ";"
list_datasets: LIST DATASETS OF quoted_string ";"
list_agents: LIST AGENTS OF quoted_string ";"
create_role: CREATE ROLE identifier [DESCRIPTION quoted_string] ";"
drop_role: DROP ROLE identifier ";"
alter_role: ALTER ROLE identifier SET DESCRIPTION quoted_string ";"
list_roles: LIST ROLES ";"
show_role: SHOW ROLE identifier ";"
grant_permission: GRANT identifier_list ON identifier TO ROLE identifier ";"
revoke_permission: REVOKE identifier_list ON identifier FROM ROLE identifier ";"
alter_user_role: ALTER USER quoted_string SET ROLE identifier ";"
show_user_permission: SHOW USER PERMISSION quoted_string ";"
show_version: SHOW VERSION ";"
grant_admin: GRANT ADMIN quoted_string ";"
revoke_admin: REVOKE ADMIN quoted_string ";"
generate_key: GENERATE KEY FOR USER quoted_string ";"
list_keys: LIST KEYS OF quoted_string ";"
drop_key: DROP KEY quoted_string OF quoted_string ";"
set_variable: SET VAR identifier identifier ";"
show_variable: SHOW VAR identifier ";"
list_variables: LIST VARS ";"
list_configs: LIST CONFIGS ";"
list_environments: LIST ENVS ";"
benchmark: BENCHMARK NUMBER NUMBER user_statement
user_statement: ping_server
| show_current_user
| create_model_provider
| drop_model_provider
| set_default_llm
| set_default_vlm
| set_default_embedding
| set_default_reranker
| set_default_asr
| set_default_tts
| reset_default_llm
| reset_default_vlm
| reset_default_embedding
| reset_default_reranker
| reset_default_asr
| reset_default_tts
| create_user_dataset_with_parser
| create_user_dataset_with_pipeline
| drop_user_dataset
| list_user_datasets
| list_user_dataset_files
| list_user_agents
| list_user_chats
| create_user_chat
| drop_user_chat
| list_user_model_providers
| list_user_default_models
| import_docs_into_dataset
| search_on_datasets
| create_chat_session
| drop_chat_session
| list_chat_sessions
| chat_on_session
ping_server: PING ";"
show_current_user: SHOW CURRENT USER ";"
create_model_provider: CREATE MODEL PROVIDER quoted_string quoted_string ";"
drop_model_provider: DROP MODEL PROVIDER quoted_string ";"
set_default_llm: SET DEFAULT LLM quoted_string ";"
set_default_vlm: SET DEFAULT VLM quoted_string ";"
set_default_embedding: SET DEFAULT EMBEDDING quoted_string ";"
set_default_reranker: SET DEFAULT RERANKER quoted_string ";"
set_default_asr: SET DEFAULT ASR quoted_string ";"
set_default_tts: SET DEFAULT TTS quoted_string ";"
reset_default_llm: RESET DEFAULT LLM ";"
reset_default_vlm: RESET DEFAULT VLM ";"
reset_default_embedding: RESET DEFAULT EMBEDDING ";"
reset_default_reranker: RESET DEFAULT RERANKER ";"
reset_default_asr: RESET DEFAULT ASR ";"
reset_default_tts: RESET DEFAULT TTS ";"
list_user_datasets: LIST DATASETS ";"
create_user_dataset_with_parser: CREATE DATASET quoted_string WITH EMBEDDING quoted_string PARSER quoted_string ";"
create_user_dataset_with_pipeline: CREATE DATASET quoted_string WITH EMBEDDING quoted_string PIPELINE quoted_string ";"
drop_user_dataset: DROP DATASET quoted_string ";"
list_user_dataset_files: LIST FILES OF DATASET quoted_string ";"
list_user_agents: LIST AGENTS ";"
list_user_chats: LIST CHATS ";"
create_user_chat: CREATE CHAT quoted_string ";"
drop_user_chat: DROP CHAT quoted_string ";"
create_chat_session: CREATE CHAT quoted_string SESSION ";"
drop_chat_session: DROP CHAT quoted_string SESSION quoted_string ";"
list_chat_sessions: LIST CHAT quoted_string SESSIONS ";"
chat_on_session: CHAT quoted_string ON quoted_string SESSION quoted_string ";"
list_user_model_providers: LIST MODEL PROVIDERS ";"
list_user_default_models: LIST DEFAULT MODELS ";"
import_docs_into_dataset: IMPORT quoted_string INTO DATASET quoted_string ";"
search_on_datasets: SEARCH quoted_string ON DATASETS quoted_string ";"
parse_dataset_docs: PARSE quoted_string OF DATASET quoted_string ";"
parse_dataset_sync: PARSE DATASET quoted_string SYNC ";"
parse_dataset_async: PARSE DATASET quoted_string ASYNC ";"
identifier_list: identifier ("," identifier)*
identifier: WORD
quoted_string: QUOTED_STRING
status: WORD
QUOTED_STRING: /'[^']+'/ | /"[^"]+"/
WORD: /[a-zA-Z0-9_\-\.]+/
NUMBER: /[0-9]+/
%import common.WS
%ignore WS
"""
class RAGFlowCLITransformer(Transformer):
def start(self, items):
return items[0]
def command(self, items):
return items[0]
def login_user(self, items):
email = items[2].children[0].strip("'\"")
return {"type": "login_user", "email": email}
def ping_server(self, items):
return {"type": "ping_server"}
def list_services(self, items):
result = {"type": "list_services"}
return result
def show_service(self, items):
service_id = int(items[2])
return {"type": "show_service", "number": service_id}
def startup_service(self, items):
service_id = int(items[2])
return {"type": "startup_service", "number": service_id}
def shutdown_service(self, items):
service_id = int(items[2])
return {"type": "shutdown_service", "number": service_id}
def restart_service(self, items):
service_id = int(items[2])
return {"type": "restart_service", "number": service_id}
def register_user(self, items):
user_name: str = items[2].children[0].strip("'\"")
nickname: str = items[4].children[0].strip("'\"")
password: str = items[6].children[0].strip("'\"")
return {"type": "register_user", "user_name": user_name, "nickname": nickname, "password": password}
def list_users(self, items):
return {"type": "list_users"}
def show_user(self, items):
user_name = items[2]
return {"type": "show_user", "user_name": user_name}
def drop_user(self, items):
user_name = items[2]
return {"type": "drop_user", "user_name": user_name}
def alter_user(self, items):
user_name = items[3]
new_password = items[4]
return {"type": "alter_user", "user_name": user_name, "password": new_password}
def create_user(self, items):
user_name = items[2]
password = items[3]
return {"type": "create_user", "user_name": user_name, "password": password, "role": "user"}
def activate_user(self, items):
user_name = items[3]
activate_status = items[4]
return {"type": "activate_user", "activate_status": activate_status, "user_name": user_name}
def list_datasets(self, items):
user_name = items[3]
return {"type": "list_datasets", "user_name": user_name}
def list_agents(self, items):
user_name = items[3]
return {"type": "list_agents", "user_name": user_name}
def create_role(self, items):
role_name = items[2]
if len(items) > 4:
description = items[4]
return {"type": "create_role", "role_name": role_name, "description": description}
else:
return {"type": "create_role", "role_name": role_name}
def drop_role(self, items):
role_name = items[2]
return {"type": "drop_role", "role_name": role_name}
def alter_role(self, items):
role_name = items[2]
description = items[5]
return {"type": "alter_role", "role_name": role_name, "description": description}
def list_roles(self, items):
return {"type": "list_roles"}
def show_role(self, items):
role_name = items[2]
return {"type": "show_role", "role_name": role_name}
def grant_permission(self, items):
action_list = items[1]
resource = items[3]
role_name = items[6]
return {"type": "grant_permission", "role_name": role_name, "resource": resource, "actions": action_list}
def revoke_permission(self, items):
action_list = items[1]
resource = items[3]
role_name = items[6]
return {"type": "revoke_permission", "role_name": role_name, "resource": resource, "actions": action_list}
def alter_user_role(self, items):
user_name = items[2]
role_name = items[5]
return {"type": "alter_user_role", "user_name": user_name, "role_name": role_name}
def show_user_permission(self, items):
user_name = items[3]
return {"type": "show_user_permission", "user_name": user_name}
def show_version(self, items):
return {"type": "show_version"}
def grant_admin(self, items):
user_name = items[2]
return {"type": "grant_admin", "user_name": user_name}
def revoke_admin(self, items):
user_name = items[2]
return {"type": "revoke_admin", "user_name": user_name}
def generate_key(self, items):
user_name = items[4]
return {"type": "generate_key", "user_name": user_name}
def list_keys(self, items):
user_name = items[3]
return {"type": "list_keys", "user_name": user_name}
def drop_key(self, items):
key = items[2]
user_name = items[4]
return {"type": "drop_key", "key": key, "user_name": user_name}
def set_variable(self, items):
var_name = items[2]
var_value = items[3]
return {"type": "set_variable", "var_name": var_name, "var_value": var_value}
def show_variable(self, items):
var_name = items[2]
return {"type": "show_variable", "var_name": var_name}
def list_variables(self, items):
return {"type": "list_variables"}
def list_configs(self, items):
return {"type": "list_configs"}
def list_environments(self, items):
return {"type": "list_environments"}
def create_model_provider(self, items):
provider_name = items[3].children[0].strip("'\"")
provider_key = items[4].children[0].strip("'\"")
return {"type": "create_model_provider", "provider_name": provider_name, "provider_key": provider_key}
def drop_model_provider(self, items):
provider_name = items[3].children[0].strip("'\"")
return {"type": "drop_model_provider", "provider_name": provider_name}
def show_current_user(self, items):
return {"type": "show_current_user"}
def set_default_llm(self, items):
llm_id = items[3].children[0].strip("'\"")
return {"type": "set_default_model", "model_type": "llm_id", "model_id": llm_id}
def set_default_vlm(self, items):
vlm_id = items[3].children[0].strip("'\"")
return {"type": "set_default_model", "model_type": "img2txt_id", "model_id": vlm_id}
def set_default_embedding(self, items):
embedding_id = items[3].children[0].strip("'\"")
return {"type": "set_default_model", "model_type": "embd_id", "model_id": embedding_id}
def set_default_reranker(self, items):
reranker_id = items[3].children[0].strip("'\"")
return {"type": "set_default_model", "model_type": "reranker_id", "model_id": reranker_id}
def set_default_asr(self, items):
asr_id = items[3].children[0].strip("'\"")
return {"type": "set_default_model", "model_type": "asr_id", "model_id": asr_id}
def set_default_tts(self, items):
tts_id = items[3].children[0].strip("'\"")
return {"type": "set_default_model", "model_type": "tts_id", "model_id": tts_id}
def reset_default_llm(self, items):
return {"type": "reset_default_model", "model_type": "llm_id"}
def reset_default_vlm(self, items):
return {"type": "reset_default_model", "model_type": "img2txt_id"}
def reset_default_embedding(self, items):
return {"type": "reset_default_model", "model_type": "embd_id"}
def reset_default_reranker(self, items):
return {"type": "reset_default_model", "model_type": "reranker_id"}
def reset_default_asr(self, items):
return {"type": "reset_default_model", "model_type": "asr_id"}
def reset_default_tts(self, items):
return {"type": "reset_default_model", "model_type": "tts_id"}
def list_user_datasets(self, items):
return {"type": "list_user_datasets"}
def create_user_dataset_with_parser(self, items):
dataset_name = items[2].children[0].strip("'\"")
embedding = items[5].children[0].strip("'\"")
parser_type = items[7].children[0].strip("'\"")
return {"type": "create_user_dataset", "dataset_name": dataset_name, "embedding": embedding,
"parser_type": parser_type}
def create_user_dataset_with_pipeline(self, items):
dataset_name = items[2].children[0].strip("'\"")
embedding = items[5].children[0].strip("'\"")
pipeline = items[7].children[0].strip("'\"")
return {"type": "create_user_dataset", "dataset_name": dataset_name, "embedding": embedding,
"pipeline": pipeline}
def drop_user_dataset(self, items):
dataset_name = items[2].children[0].strip("'\"")
return {"type": "drop_user_dataset", "dataset_name": dataset_name}
def list_user_dataset_files(self, items):
dataset_name = items[4].children[0].strip("'\"")
return {"type": "list_user_dataset_files", "dataset_name": dataset_name}
def list_user_agents(self, items):
return {"type": "list_user_agents"}
def list_user_chats(self, items):
return {"type": "list_user_chats"}
def create_user_chat(self, items):
chat_name = items[2].children[0].strip("'\"")
return {"type": "create_user_chat", "chat_name": chat_name}
def drop_user_chat(self, items):
chat_name = items[2].children[0].strip("'\"")
return {"type": "drop_user_chat", "chat_name": chat_name}
def list_user_model_providers(self, items):
return {"type": "list_user_model_providers"}
def list_user_default_models(self, items):
return {"type": "list_user_default_models"}
def parse_dataset_docs(self, items):
document_list_str = items[1].children[0].strip("'\"")
document_names = document_list_str.split(",")
if len(document_names) == 1:
document_names = document_names[0]
document_names = document_names.split(" ")
dataset_name = items[4].children[0].strip("'\"")
return {"type": "parse_dataset_docs", "dataset_name": dataset_name, "document_names": document_names}
def parse_dataset_sync(self, items):
dataset_name = items[2].children[0].strip("'\"")
return {"type": "parse_dataset", "dataset_name": dataset_name, "method": "sync"}
def parse_dataset_async(self, items):
dataset_name = items[2].children[0].strip("'\"")
return {"type": "parse_dataset", "dataset_name": dataset_name, "method": "async"}
def create_chat_session(self, items):
chat_name = items[2].children[0].strip("'\"")
return {"type": "create_chat_session", "chat_name": chat_name}
def drop_chat_session(self, items):
chat_name = items[2].children[0].strip("'\"")
session_id = items[4].children[0].strip("'\"")
return {"type": "drop_chat_session", "chat_name": chat_name, "session_id": session_id}
def list_chat_sessions(self, items):
chat_name = items[2].children[0].strip("'\"")
return {"type": "list_chat_sessions", "chat_name": chat_name}
def chat_on_session(self, items):
message = items[1].children[0].strip("'\"")
chat_name = items[3].children[0].strip("'\"")
session_id = items[5].children[0].strip("'\"")
return {"type": "chat_on_session", "message": message, "chat_name": chat_name, "session_id": session_id}
def import_docs_into_dataset(self, items):
document_list_str = items[1].children[0].strip("'\"")
document_paths = document_list_str.split(",")
if len(document_paths) == 1:
document_paths = document_paths[0]
document_paths = document_paths.split(" ")
dataset_name = items[4].children[0].strip("'\"")
return {"type": "import_docs_into_dataset", "dataset_name": dataset_name, "document_paths": document_paths}
def search_on_datasets(self, items):
question = items[1].children[0].strip("'\"")
datasets_str = items[4].children[0].strip("'\"")
datasets = datasets_str.split(",")
if len(datasets) == 1:
datasets = datasets[0]
datasets = datasets.split(" ")
return {"type": "search_on_datasets", "datasets": datasets, "question": question}
def benchmark(self, items):
concurrency: int = int(items[1])
iterations: int = int(items[2])
command = items[3].children[0]
return {"type": "benchmark", "concurrency": concurrency, "iterations": iterations, "command": command}
def action_list(self, items):
return items
def meta_command(self, items):
command_name = str(items[0]).lower()
args = items[1:] if len(items) > 1 else []
# handle quoted parameter
parsed_args = []
for arg in args:
if hasattr(arg, "value"):
parsed_args.append(arg.value)
else:
parsed_args.append(str(arg))
return {"type": "meta", "command": command_name, "args": parsed_args}
def meta_command_name(self, items):
return items[0]
def meta_args(self, items):
return items
| {
"repo_id": "infiniflow/ragflow",
"file_path": "admin/client/parser.py",
"license": "Apache License 2.0",
"lines": 547,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:admin/client/ragflow_client.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import time
import uuid
from typing import Any, List, Optional
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor, as_completed
import urllib.parse
from pathlib import Path
from http_client import HttpClient
from lark import Tree
from user import encrypt_password, login_user
import getpass
import base64
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Cryptodome.PublicKey import RSA
try:
from requests_toolbelt import MultipartEncoder
except Exception as e: # pragma: no cover - fallback without toolbelt
print(f"Fallback without belt: {e}")
MultipartEncoder = None
def encrypt(input_string):
pub = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArq9XTUSeYr2+N1h3Afl/z8Dse/2yD0ZGrKwx+EEEcdsBLca9Ynmx3nIB5obmLlSfmskLpBo0UACBmB5rEjBp2Q2f3AG3Hjd4B+gNCG6BDaawuDlgANIhGnaTLrIqWrrcm4EMzJOnAOI1fgzJRsOOUEfaS318Eq9OVO3apEyCCt0lOQK6PuksduOjVxtltDav+guVAA068NrPYmRNabVKRNLJpL8w4D44sfth5RvZ3q9t+6RTArpEtc5sh5ChzvqPOzKGMXW83C95TxmXqpbK6olN4RevSfVjEAgCydH6HN6OhtOQEcnrU97r9H0iZOWwbw3pVrZiUkuRD1R56Wzs2wIDAQAB\n-----END PUBLIC KEY-----"
pub_key = RSA.importKey(pub)
cipher = Cipher_pkcs1_v1_5.new(pub_key)
cipher_text = cipher.encrypt(base64.b64encode(input_string.encode("utf-8")))
return base64.b64encode(cipher_text).decode("utf-8")
class RAGFlowClient:
def __init__(self, http_client: HttpClient, server_type: str):
self.http_client = http_client
self.server_type = server_type
def login_user(self, command):
try:
response = self.http_client.request("GET", "/system/ping", use_api_base=False, auth_kind="web")
if response.status_code == 200 and response.content == b"pong":
pass
else:
print("Server is down")
return
except Exception as e:
print(str(e))
print("Can't access server for login (connection failed)")
return
email : str = command["email"]
user_password = getpass.getpass(f"password for {email}: ").strip()
try:
token = login_user(self.http_client, self.server_type, email, user_password)
self.http_client.login_token = token
print(f"Login user {email} successfully")
except Exception as e:
print(str(e))
print("Can't access server for login (connection failed)")
def ping_server(self, command):
iterations = command.get("iterations", 1)
if iterations > 1:
response = self.http_client.request("GET", "/system/ping", use_api_base=False, auth_kind="web",
iterations=iterations)
return response
else:
response = self.http_client.request("GET", "/system/ping", use_api_base=False, auth_kind="web")
if response.status_code == 200 and response.content == b"pong":
print("Server is alive")
else:
print("Server is down")
return None
def register_user(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
username: str = command["user_name"]
nickname: str = command["nickname"]
password: str = command["password"]
enc_password = encrypt_password(password)
print(f"Register user: {nickname}, email: {username}, password: ******")
payload = {"email": username, "nickname": nickname, "password": enc_password}
response = self.http_client.request(method="POST", path="/user/register",
json_body=payload, use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code == 200:
if res_json["code"] == 0:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to register user {username}, code: {res_json['code']}, message: {res_json['message']}")
else:
print(f"Fail to register user {username}, code: {res_json['code']}, message: {res_json['message']}")
def list_services(self):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
response = self.http_client.request("GET", "/admin/services", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to get all services, code: {res_json['code']}, message: {res_json['message']}")
pass
def show_service(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
service_id: int = command["number"]
response = self.http_client.request("GET", f"/admin/services/{service_id}", use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
res_data = res_json["data"]
if "status" in res_data and res_data["status"] == "alive":
print(f"Service {res_data['service_name']} is alive, ")
res_message = res_data["message"]
if res_message is None:
return
elif isinstance(res_message, str):
print(res_message)
else:
data = self._format_service_detail_table(res_message)
self._print_table_simple(data)
else:
print(f"Service {res_data['service_name']} is down, {res_data['message']}")
else:
print(f"Fail to show service, code: {res_json['code']}, message: {res_json['message']}")
def restart_service(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
# service_id: int = command["number"]
print("Restart service isn't implemented")
def shutdown_service(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
# service_id: int = command["number"]
print("Shutdown service isn't implemented")
def startup_service(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
# service_id: int = command["number"]
print("Startup service isn't implemented")
def list_users(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
response = self.http_client.request("GET", "/admin/users", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to get all users, code: {res_json['code']}, message: {res_json['message']}")
def show_user(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Showing user: {user_name}")
response = self.http_client.request("GET", f"/admin/users/{user_name}", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
table_data = res_json["data"][0]
table_data.pop("avatar")
self._print_table_simple(table_data)
else:
print(f"Fail to get user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def drop_user(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Drop user: {user_name}")
response = self.http_client.request("DELETE", f"/admin/users/{user_name}", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to drop user, code: {res_json['code']}, message: {res_json['message']}")
def alter_user(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
password_tree: Tree = command["password"]
password: str = password_tree.children[0].strip("'\"")
print(f"Alter user: {user_name}, password: ******")
response = self.http_client.request("PUT", f"/admin/users/{user_name}/password",
json_body={"new_password": encrypt_password(password)}, use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to alter password, code: {res_json['code']}, message: {res_json['message']}")
def create_user(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
password_tree: Tree = command["password"]
password: str = password_tree.children[0].strip("'\"")
role: str = command["role"]
print(f"Create user: {user_name}, password: ******, role: {role}")
# enpass1 = encrypt(password)
enc_password = encrypt_password(password)
response = self.http_client.request(method="POST", path="/admin/users",
json_body={"username": user_name, "password": enc_password, "role": role},
use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to create user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def activate_user(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
activate_tree: Tree = command["activate_status"]
activate_status: str = activate_tree.children[0].strip("'\"")
if activate_status.lower() in ["on", "off"]:
print(f"Alter user {user_name} activate status, turn {activate_status.lower()}.")
response = self.http_client.request("PUT", f"/admin/users/{user_name}/activate",
json_body={"activate_status": activate_status}, use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to alter activate status, code: {res_json['code']}, message: {res_json['message']}")
else:
print(f"Unknown activate status: {activate_status}.")
def grant_admin(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
response = self.http_client.request("PUT", f"/admin/users/{user_name}/admin", use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(
f"Fail to grant {user_name} admin authorization, code: {res_json['code']}, message: {res_json['message']}")
def revoke_admin(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
response = self.http_client.request("DELETE", f"/admin/users/{user_name}/admin", use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(
f"Fail to revoke {user_name} admin authorization, code: {res_json['code']}, message: {res_json['message']}")
def create_role(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name: str = role_name_tree.children[0].strip("'\"")
desc_str: str = ""
if "description" in command and command["description"] is not None:
desc_tree: Tree = command["description"]
desc_str = desc_tree.children[0].strip("'\"")
print(f"create role name: {role_name}, description: {desc_str}")
response = self.http_client.request("POST", "/admin/roles",
json_body={"role_name": role_name, "description": desc_str},
use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to create role {role_name}, code: {res_json['code']}, message: {res_json['message']}")
def drop_role(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name: str = role_name_tree.children[0].strip("'\"")
print(f"drop role name: {role_name}")
response = self.http_client.request("DELETE", f"/admin/roles/{role_name}",
use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to drop role {role_name}, code: {res_json['code']}, message: {res_json['message']}")
def alter_role(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name: str = role_name_tree.children[0].strip("'\"")
desc_tree: Tree = command["description"]
desc_str: str = desc_tree.children[0].strip("'\"")
print(f"alter role name: {role_name}, description: {desc_str}")
response = self.http_client.request("PUT", f"/admin/roles/{role_name}",
json_body={"description": desc_str},
use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(
f"Fail to update role {role_name} with description: {desc_str}, code: {res_json['code']}, message: {res_json['message']}")
def list_roles(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
response = self.http_client.request("GET", "/admin/roles",
use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list roles, code: {res_json['code']}, message: {res_json['message']}")
def show_role(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name: str = role_name_tree.children[0].strip("'\"")
print(f"show role: {role_name}")
response = self.http_client.request("GET", f"/admin/roles/{role_name}/permission",
use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list roles, code: {res_json['code']}, message: {res_json['message']}")
def grant_permission(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name_str: str = role_name_tree.children[0].strip("'\"")
resource_tree: Tree = command["resource"]
resource_str: str = resource_tree.children[0].strip("'\"")
action_tree_list: list = command["actions"]
actions: list = []
for action_tree in action_tree_list:
action_str: str = action_tree.children[0].strip("'\"")
actions.append(action_str)
print(f"grant role_name: {role_name_str}, resource: {resource_str}, actions: {actions}")
response = self.http_client.request("POST", f"/admin/roles/{role_name_str}/permission",
json_body={"actions": actions, "resource": resource_str}, use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(
f"Fail to grant role {role_name_str} with {actions} on {resource_str}, code: {res_json['code']}, message: {res_json['message']}")
def revoke_permission(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name_str: str = role_name_tree.children[0].strip("'\"")
resource_tree: Tree = command["resource"]
resource_str: str = resource_tree.children[0].strip("'\"")
action_tree_list: list = command["actions"]
actions: list = []
for action_tree in action_tree_list:
action_str: str = action_tree.children[0].strip("'\"")
actions.append(action_str)
print(f"revoke role_name: {role_name_str}, resource: {resource_str}, actions: {actions}")
response = self.http_client.request("DELETE", f"/admin/roles/{role_name_str}/permission",
json_body={"actions": actions, "resource": resource_str}, use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(
f"Fail to revoke role {role_name_str} with {actions} on {resource_str}, code: {res_json['code']}, message: {res_json['message']}")
def alter_user_role(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
role_name_tree: Tree = command["role_name"]
role_name_str: str = role_name_tree.children[0].strip("'\"")
user_name_tree: Tree = command["user_name"]
user_name_str: str = user_name_tree.children[0].strip("'\"")
print(f"alter_user_role user_name: {user_name_str}, role_name: {role_name_str}")
response = self.http_client.request("PUT", f"/admin/users/{user_name_str}/role",
json_body={"role_name": role_name_str}, use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(
f"Fail to alter user: {user_name_str} to role {role_name_str}, code: {res_json['code']}, message: {res_json['message']}")
def show_user_permission(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
user_name_tree: Tree = command["user_name"]
user_name_str: str = user_name_tree.children[0].strip("'\"")
print(f"show_user_permission user_name: {user_name_str}")
response = self.http_client.request("GET", f"/admin/users/{user_name_str}/permission", use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(
f"Fail to show user: {user_name_str} permission, code: {res_json['code']}, message: {res_json['message']}")
def generate_key(self, command: dict[str, Any]) -> None:
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Generating API key for user: {user_name}")
response = self.http_client.request("POST", f"/admin/users/{user_name}/keys", use_api_base=True,
auth_kind="admin")
res_json: dict[str, Any] = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(
f"Failed to generate key for user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def list_keys(self, command: dict[str, Any]) -> None:
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Listing API keys for user: {user_name}")
response = self.http_client.request("GET", f"/admin/users/{user_name}/keys", use_api_base=True,
auth_kind="admin")
res_json: dict[str, Any] = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Failed to list keys for user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def drop_key(self, command: dict[str, Any]) -> None:
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
key_tree: Tree = command["key"]
key: str = key_tree.children[0].strip("'\"")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Dropping API key for user: {user_name}")
# URL encode the key to handle special characters
encoded_key: str = urllib.parse.quote(key, safe="")
response = self.http_client.request("DELETE", f"/admin/users/{user_name}/keys/{encoded_key}", use_api_base=True,
auth_kind="admin")
res_json: dict[str, Any] = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Failed to drop key for user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def set_variable(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
var_name_tree: Tree = command["var_name"]
var_name = var_name_tree.children[0].strip("'\"")
var_value_tree: Tree = command["var_value"]
var_value = var_value_tree.children[0].strip("'\"")
response = self.http_client.request("PUT", "/admin/variables",
json_body={"var_name": var_name, "var_value": var_value}, use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(
f"Fail to set variable {var_name} to {var_value}, code: {res_json['code']}, message: {res_json['message']}")
def show_variable(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
var_name_tree: Tree = command["var_name"]
var_name = var_name_tree.children[0].strip("'\"")
response = self.http_client.request(method="GET", path="/admin/variables", json_body={"var_name": var_name},
use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to get variable {var_name}, code: {res_json['code']}, message: {res_json['message']}")
def list_variables(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
response = self.http_client.request("GET", "/admin/variables", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list variables, code: {res_json['code']}, message: {res_json['message']}")
def list_configs(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
response = self.http_client.request("GET", "/admin/configs", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list variables, code: {res_json['code']}, message: {res_json['message']}")
def list_environments(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
response = self.http_client.request("GET", "/admin/environments", use_api_base=True, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list variables, code: {res_json['code']}, message: {res_json['message']}")
def handle_list_datasets(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Listing all datasets of user: {user_name}")
response = self.http_client.request("GET", f"/admin/users/{user_name}/datasets", use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
table_data = res_json["data"]
for t in table_data:
t.pop("avatar")
self._print_table_simple(table_data)
else:
print(f"Fail to get all datasets of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def handle_list_agents(self, command):
if self.server_type != "admin":
print("This command is only allowed in ADMIN mode")
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Listing all agents of user: {user_name}")
response = self.http_client.request("GET", f"/admin/users/{user_name}/agents", use_api_base=True,
auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
table_data = res_json["data"]
for t in table_data:
t.pop("avatar")
self._print_table_simple(table_data)
else:
print(f"Fail to get all agents of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def show_current_user(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
print("show current user")
def create_model_provider(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
llm_factory: str = command["provider_name"]
api_key: str = command["provider_key"]
payload = {"api_key": api_key, "llm_factory": llm_factory}
response = self.http_client.request("POST", "/llm/set_api_key", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to add model provider {llm_factory}")
else:
print(f"Fail to add model provider {llm_factory}, code: {res_json['code']}, message: {res_json['message']}")
def drop_model_provider(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
llm_factory: str = command["provider_name"]
payload = {"llm_factory": llm_factory}
response = self.http_client.request("POST", "/llm/delete_factory", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to drop model provider {llm_factory}")
else:
print(
f"Fail to drop model provider {llm_factory}, code: {res_json['code']}, message: {res_json['message']}")
def set_default_model(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
model_type: str = command["model_type"]
model_id: str = command["model_id"]
self._set_default_models(model_type, model_id)
def reset_default_model(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
model_type: str = command["model_type"]
self._set_default_models(model_type, "")
def list_user_datasets(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
iterations = command.get("iterations", 1)
if iterations > 1:
response = self.http_client.request("POST", "/kb/list", use_api_base=False, auth_kind="web",
iterations=iterations)
return response
else:
response = self.http_client.request("POST", "/kb/list", use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"]["kbs"])
else:
print(f"Fail to list datasets, code: {res_json['code']}, message: {res_json['message']}")
return None
def create_user_dataset(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
payload = {
"name": command["dataset_name"],
"embd_id": command["embedding"]
}
if "parser_id" in command:
payload["parser_id"] = command["parser"]
if "pipeline" in command:
payload["pipeline_id"] = command["pipeline"]
response = self.http_client.request("POST", "/kb/create", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to create datasets, code: {res_json['code']}, message: {res_json['message']}")
def drop_user_dataset(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
dataset_name = command["dataset_name"]
dataset_id = self._get_dataset_id(dataset_name)
if dataset_id is None:
return
payload = {"kb_id": dataset_id}
response = self.http_client.request("POST", "/kb/rm", json_body=payload, use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code == 200:
print(f"Drop dataset {dataset_name} successfully")
else:
print(f"Fail to drop datasets, code: {res_json['code']}, message: {res_json['message']}")
def list_user_dataset_files(self, command_dict):
if self.server_type != "user":
print("This command is only allowed in USER mode")
dataset_name = command_dict["dataset_name"]
dataset_id = self._get_dataset_id(dataset_name)
if dataset_id is None:
return
res_json = self._list_documents(dataset_name, dataset_id)
if res_json is None:
return
self._print_table_simple(res_json)
def list_user_agents(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
response = self.http_client.request("GET", "/canvas/list", use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list datasets, code: {res_json['code']}, message: {res_json['message']}")
def list_user_chats(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
res_json = self._list_chats(command)
if res_json is None:
return None
if "iterations" in command:
# for benchmark
return res_json
self._print_table_simple(res_json)
def create_user_chat(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
'''
description
:
""
icon
:
""
language
:
"English"
llm_id
:
"glm-4-flash@ZHIPU-AI"
llm_setting
:
{}
name
:
"xx"
prompt_config
:
{empty_response: "", prologue: "Hi! I'm your assistant. What can I do for you?", quote: true,…}
empty_response
:
""
keyword
:
false
parameters
:
[{key: "knowledge", optional: false}]
prologue
:
"Hi! I'm your assistant. What can I do for you?"
quote
:
true
reasoning
:
false
refine_multiturn
:
false
system
:
"You are an intelligent assistant. Your primary function is to answer questions based strictly on the provided knowledge base.\n\n **Essential Rules:**\n - Your answer must be derived **solely** from this knowledge base: `{knowledge}`.\n - **When information is available**: Summarize the content to give a detailed answer.\n - **When information is unavailable**: Your response must contain this exact sentence: \"The answer you are looking for is not found in the knowledge base!\"\n - **Always consider** the entire conversation history."
toc_enhance
:
false
tts
:
false
use_kg
:
false
similarity_threshold
:
0.2
top_n
:
8
vector_similarity_weight
:
0.3
'''
chat_name = command["chat_name"]
payload = {
"description": "",
"icon": "",
"language": "English",
"llm_setting": {},
"prompt_config": {
"empty_response": "",
"prologue": "Hi! I'm your assistant. What can I do for you?",
"quote": True,
"keyword": False,
"tts": False,
"system": "You are an intelligent assistant. Your primary function is to answer questions based strictly on the provided knowledge base.\n\n **Essential Rules:**\n - Your answer must be derived **solely** from this knowledge base: `{knowledge}`.\n - **When information is available**: Summarize the content to give a detailed answer.\n - **When information is unavailable**: Your response must contain this exact sentence: \"The answer you are looking for is not found in the knowledge base!\"\n - **Always consider** the entire conversation history.",
"refine_multiturn": False,
"use_kg": False,
"reasoning": False,
"parameters": [
{
"key": "knowledge",
"optional": False
}
],
"toc_enhance": False
},
"similarity_threshold": 0.2,
"top_n": 8,
"vector_similarity_weight": 0.3
}
payload.update({"name": chat_name})
response = self.http_client.request("POST", "/dialog/set", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to create chat: {chat_name}")
else:
print(f"Fail to create chat {chat_name}, code: {res_json['code']}, message: {res_json['message']}")
def drop_user_chat(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
chat_name = command["chat_name"]
res_json = self._list_chats(command)
to_drop_chat_ids = []
for elem in res_json:
if elem["name"] == chat_name:
to_drop_chat_ids.append(elem["id"])
payload = {"dialog_ids": to_drop_chat_ids}
response = self.http_client.request("POST", "/dialog/rm", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to drop chat: {chat_name}")
else:
print(f"Fail to drop chat {chat_name}, code: {res_json['code']}, message: {res_json['message']}")
def _get_chat_id_by_name(self, chat_name):
"""Get chat (dialog) ID by name."""
res_json = self._list_chats({})
if res_json is None:
return None
for elem in res_json:
if elem["name"] == chat_name:
return elem["id"]
print(f"Chat '{chat_name}' not found")
return None
def _list_chat_sessions(self, dialog_id):
"""List all sessions (conversations) for a given dialog."""
response = self.http_client.request("GET", f"/conversation/list?dialog_id={dialog_id}", use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
return res_json["data"]
else:
print(f"Fail to list chat sessions, code: {res_json['code']}, message: {res_json['message']}")
return None
def create_chat_session(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
chat_name = command["chat_name"]
dialog_id = self._get_chat_id_by_name(chat_name)
if dialog_id is None:
return
conversation_id = str(uuid.uuid4()).replace("-", "")
payload = {
"conversation_id": conversation_id,
"is_new": True,
"dialog_id": dialog_id
}
response = self.http_client.request("POST", "/conversation/set", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to create chat session for chat: {chat_name}")
else:
print(f"Fail to create chat session for chat {chat_name}, code: {res_json['code']}, message: {res_json['message']}")
def drop_chat_session(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
chat_name = command["chat_name"]
session_id = command["session_id"]
dialog_id = self._get_chat_id_by_name(chat_name)
if dialog_id is None:
return
sessions = self._list_chat_sessions(dialog_id)
if sessions is None:
return
to_drop_session_ids = []
for session in sessions:
if session["id"] == session_id:
to_drop_session_ids.append(session["id"])
if not to_drop_session_ids:
print(f"Chat session '{session_id}' not found in chat '{chat_name}'")
return
payload = {"conversation_ids": to_drop_session_ids}
response = self.http_client.request("POST", "/conversation/rm", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to drop chat session '{session_id}' from chat: {chat_name}")
else:
print(f"Fail to drop chat session '{session_id}' from chat {chat_name}, code: {res_json['code']}, message: {res_json['message']}")
def list_chat_sessions(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
chat_name = command["chat_name"]
dialog_id = self._get_chat_id_by_name(chat_name)
if dialog_id is None:
return
sessions = self._list_chat_sessions(dialog_id)
if sessions is None:
return
# Add chat_name to each session for display
for session in sessions:
session["chat_name"] = chat_name
if "iterations" in command:
# for benchmark
return sessions
self._print_table_simple(sessions)
def chat_on_session(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
message = command["message"]
session_id = command["session_id"]
# Prepare payload for completion API
# Note: stream parameter is not sent, server defaults to stream=True
payload = {
"conversation_id": session_id,
"messages": [{"role": "user", "content": message}]
}
response = self.http_client.request("POST", "/conversation/completion", json_body=payload,
use_api_base=False, auth_kind="web", stream=True)
if response.status_code != 200:
print(f"Fail to chat on session, status code: {response.status_code}")
return
print("Assistant: ", end="", flush=True)
full_answer = ""
for line in response.iter_lines():
if not line:
continue
line_str = line.decode('utf-8')
if not line_str.startswith('data:'):
continue
data_str = line_str[5:].strip()
if data_str == '[DONE]':
break
try:
data_json = json.loads(data_str)
if data_json.get("code") != 0:
print(f"\nFail to chat on session, code: {data_json.get('code')}, message: {data_json.get('message', '')}")
return
# Check if it's the final message
if data_json.get("data") is True:
break
answer = data_json.get("data", {}).get("answer", "")
if answer:
print(answer, end="", flush=True)
full_answer += answer
except json.JSONDecodeError:
continue
print() # Final newline
def list_user_model_providers(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
response = self.http_client.request("GET", "/llm/my_llms", use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code == 200:
new_input = []
for key, value in res_json["data"].items():
new_input.append({"model provider": key, "models": value})
self._print_table_simple(new_input)
else:
print(f"Fail to list model provider, code: {res_json['code']}, message: {res_json['message']}")
def list_user_default_models(self, command):
if self.server_type != "user":
print("This command is only allowed in USER mode")
res_json = self._get_default_models()
if res_json is None:
return
else:
new_input = []
for key, value in res_json.items():
if key == "asr_id" and value != "":
new_input.append({"model_category": "ASR", "model_name": value})
elif key == "embd_id" and value != "":
new_input.append({"model_category": "Embedding", "model_name": value})
elif key == "llm_id" and value != "":
new_input.append({"model_category": "LLM", "model_name": value})
elif key == "rerank_id" and value != "":
new_input.append({"model_category": "Reranker", "model_name": value})
elif key == "tts_id" and value != "":
new_input.append({"model_category": "TTS", "model_name": value})
elif key == "img2txt_id" and value != "":
new_input.append({"model_category": "VLM", "model_name": value})
else:
continue
self._print_table_simple(new_input)
def parse_dataset_docs(self, command_dict):
if self.server_type != "user":
print("This command is only allowed in USER mode")
dataset_name = command_dict["dataset_name"]
dataset_id = self._get_dataset_id(dataset_name)
if dataset_id is None:
return
res_json = self._list_documents(dataset_name, dataset_id)
if res_json is None:
return
document_names = command_dict["document_names"]
document_ids = []
to_parse_doc_names = []
for doc in res_json:
doc_name = doc["name"]
if doc_name in document_names:
document_ids.append(doc["id"])
document_names.remove(doc_name)
to_parse_doc_names.append(doc_name)
if len(document_ids) == 0:
print(f"No documents found in {dataset_name}")
return
if len(document_names) != 0:
print(f"Documents {document_names} not found in {dataset_name}")
payload = {"doc_ids": document_ids, "run": 1}
response = self.http_client.request("POST", "/document/run", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to parse {to_parse_doc_names} of {dataset_name}")
else:
print(
f"Fail to parse documents {res_json["data"]["docs"]}, code: {res_json['code']}, message: {res_json['message']}")
def parse_dataset(self, command_dict):
if self.server_type != "user":
print("This command is only allowed in USER mode")
dataset_name = command_dict["dataset_name"]
dataset_id = self._get_dataset_id(dataset_name)
if dataset_id is None:
return
res_json = self._list_documents(dataset_name, dataset_id)
if res_json is None:
return
document_ids = []
for doc in res_json:
document_ids.append(doc["id"])
payload = {"doc_ids": document_ids, "run": 1}
response = self.http_client.request("POST", "/document/run", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
pass
else:
print(f"Fail to parse dataset {dataset_name}, code: {res_json['code']}, message: {res_json['message']}")
if command_dict["method"] == "async":
print(f"Success to start parse dataset {dataset_name}")
return
else:
print(f"Start to parse dataset {dataset_name}, please wait...")
if self._wait_parse_done(dataset_name, dataset_id):
print(f"Success to parse dataset {dataset_name}")
else:
print(f"Parse dataset {dataset_name} timeout")
def import_docs_into_dataset(self, command_dict):
if self.server_type != "user":
print("This command is only allowed in USER mode")
dataset_name = command_dict["dataset_name"]
dataset_id = self._get_dataset_id(dataset_name)
if dataset_id is None:
return
document_paths = command_dict["document_paths"]
paths = [Path(p) for p in document_paths]
fields = []
file_handles = []
try:
for path in paths:
fh = path.open("rb")
fields.append(("file", (path.name, fh)))
file_handles.append(fh)
fields.append(("kb_id", dataset_id))
encoder = MultipartEncoder(fields=fields)
headers = {"Content-Type": encoder.content_type}
response = self.http_client.request(
"POST",
"/document/upload",
headers=headers,
data=encoder,
json_body=None,
params=None,
stream=False,
auth_kind="web",
use_api_base=False
)
res = response.json()
if res.get("code") == 0:
print(f"Success to import documents into dataset {dataset_name}")
else:
print(f"Fail to import documents: code: {res['code']}, message: {res['message']}")
except Exception as exc:
print(f"Fail to import document into dataset: {dataset_name}, error: {exc}")
finally:
for fh in file_handles:
fh.close()
def search_on_datasets(self, command_dict):
if self.server_type != "user":
print("This command is only allowed in USER mode")
dataset_names = command_dict["datasets"]
dataset_ids = []
for dataset_name in dataset_names:
dataset_id = self._get_dataset_id(dataset_name)
if dataset_id is None:
return
dataset_ids.append(dataset_id)
payload = {
"question": command_dict["question"],
"kb_id": dataset_ids,
"similarity_threshold": 0.2,
"vector_similarity_weight": 0.3,
# "top_k": 1024,
# "kb_id": command_dict["datasets"][0],
}
iterations = command_dict.get("iterations", 1)
if iterations > 1:
response = self.http_client.request("POST", "/chunk/retrieval_test", json_body=payload, use_api_base=False,
auth_kind="web", iterations=iterations)
return response
else:
response = self.http_client.request("POST", "/chunk/retrieval_test", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200:
if res_json["code"] == 0:
self._print_table_simple(res_json["data"]["chunks"])
else:
print(
f"Fail to search datasets: {dataset_names}, code: {res_json['code']}, message: {res_json['message']}")
else:
print(
f"Fail to search datasets: {dataset_names}, code: {res_json['code']}, message: {res_json['message']}")
def show_version(self, command):
if self.server_type == "admin":
response = self.http_client.request("GET", "/admin/version", use_api_base=True, auth_kind="admin")
else:
response = self.http_client.request("GET", "/system/version", use_api_base=False, auth_kind="admin")
res_json = response.json()
if response.status_code == 200:
if self.server_type == "admin":
self._print_table_simple(res_json["data"])
else:
self._print_table_simple({"version": res_json["data"]})
else:
print(f"Fail to show version, code: {res_json['code']}, message: {res_json['message']}")
def _wait_parse_done(self, dataset_name: str, dataset_id: str):
start = time.monotonic()
while True:
docs = self._list_documents(dataset_name, dataset_id)
if docs is None:
return False
all_done = True
for doc in docs:
if doc.get("run") != "3":
print(f"Document {doc["name"]} is not done, status: {doc.get("run")}")
all_done = False
break
if all_done:
return True
if time.monotonic() - start > 60:
return False
time.sleep(0.5)
def _list_documents(self, dataset_name: str, dataset_id: str):
response = self.http_client.request("POST", f"/document/list?kb_id={dataset_id}", use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code != 200:
print(
f"Fail to list files from dataset {dataset_name}, code: {res_json['code']}, message: {res_json['message']}")
return None
return res_json["data"]["docs"]
def _get_dataset_id(self, dataset_name: str):
response = self.http_client.request("POST", "/kb/list", use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code != 200:
print(f"Fail to list datasets, code: {res_json['code']}, message: {res_json['message']}")
return None
dataset_list = res_json["data"]["kbs"]
dataset_id: str = ""
for dataset in dataset_list:
if dataset["name"] == dataset_name:
dataset_id = dataset["id"]
if dataset_id == "":
print(f"Dataset {dataset_name} not found")
return None
return dataset_id
def _list_chats(self, command):
iterations = command.get("iterations", 1)
if iterations > 1:
response = self.http_client.request("POST", "/dialog/next", use_api_base=False, auth_kind="web",
iterations=iterations)
return response
else:
response = self.http_client.request("POST", "/dialog/next", use_api_base=False, auth_kind="web",
iterations=iterations)
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
return res_json["data"]["dialogs"]
else:
print(f"Fail to list datasets, code: {res_json['code']}, message: {res_json['message']}")
return None
def _get_default_models(self):
response = self.http_client.request("GET", "/user/tenant_info", use_api_base=False, auth_kind="web")
res_json = response.json()
if response.status_code == 200:
if res_json["code"] == 0:
return res_json["data"]
else:
print(f"Fail to list user default models, code: {res_json['code']}, message: {res_json['message']}")
return None
else:
print(f"Fail to list user default models, HTTP code: {response.status_code}, message: {res_json}")
return None
def _set_default_models(self, model_type, model_id):
current_payload = self._get_default_models()
if current_payload is None:
return
else:
current_payload.update({model_type: model_id})
payload = {
"tenant_id": current_payload["tenant_id"],
"llm_id": current_payload["llm_id"],
"embd_id": current_payload["embd_id"],
"img2txt_id": current_payload["img2txt_id"],
"asr_id": current_payload["asr_id"],
"tts_id": current_payload["tts_id"],
}
response = self.http_client.request("POST", "/user/set_tenant_info", json_body=payload, use_api_base=False,
auth_kind="web")
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
print(f"Success to set default llm to {model_type}")
else:
print(f"Fail to set default llm to {model_type}, code: {res_json['code']}, message: {res_json['message']}")
def _format_service_detail_table(self, data):
if isinstance(data, list):
return data
if not all([isinstance(v, list) for v in data.values()]):
# normal table
return data
# handle task_executor heartbeats map, for example {'name': [{'done': 2, 'now': timestamp1}, {'done': 3, 'now': timestamp2}]
task_executor_list = []
for k, v in data.items():
# display latest status
heartbeats = sorted(v, key=lambda x: x["now"], reverse=True)
task_executor_list.append(
{
"task_executor_name": k,
**heartbeats[0],
}
if heartbeats
else {"task_executor_name": k}
)
return task_executor_list
def _print_table_simple(self, data):
if not data:
print("No data to print")
return
if isinstance(data, dict):
# handle single row data
data = [data]
columns = list(set().union(*(d.keys() for d in data)))
columns.sort()
col_widths = {}
def get_string_width(text):
half_width_chars = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\t\n\r"
width = 0
for char in text:
if char in half_width_chars:
width += 1
else:
width += 2
return width
for col in columns:
max_width = get_string_width(str(col))
for item in data:
value_len = get_string_width(str(item.get(col, "")))
if value_len > max_width:
max_width = value_len
col_widths[col] = max(2, max_width)
# Generate delimiter
separator = "+" + "+".join(["-" * (col_widths[col] + 2) for col in columns]) + "+"
# Print header
print(separator)
header = "|" + "|".join([f" {col:<{col_widths[col]}} " for col in columns]) + "|"
print(header)
print(separator)
# Print data
for item in data:
row = "|"
for col in columns:
value = str(item.get(col, ""))
if get_string_width(value) > col_widths[col]:
value = value[: col_widths[col] - 3] + "..."
row += f" {value:<{col_widths[col] - (get_string_width(value) - len(value))}} |"
print(row)
print(separator)
def run_command(client: RAGFlowClient, command_dict: dict):
command_type = command_dict["type"]
match command_type:
case "benchmark":
run_benchmark(client, command_dict)
case "login_user":
client.login_user(command_dict)
case "ping_server":
return client.ping_server(command_dict)
case "register_user":
client.register_user(command_dict)
case "list_services":
client.list_services()
case "show_service":
client.show_service(command_dict)
case "restart_service":
client.restart_service(command_dict)
case "shutdown_service":
client.shutdown_service(command_dict)
case "startup_service":
client.startup_service(command_dict)
case "list_users":
client.list_users(command_dict)
case "show_user":
client.show_user(command_dict)
case "drop_user":
client.drop_user(command_dict)
case "alter_user":
client.alter_user(command_dict)
case "create_user":
client.create_user(command_dict)
case "activate_user":
client.activate_user(command_dict)
case "list_datasets":
client.handle_list_datasets(command_dict)
case "list_agents":
client.handle_list_agents(command_dict)
case "create_role":
client.create_role(command_dict)
case "drop_role":
client.drop_role(command_dict)
case "alter_role":
client.alter_role(command_dict)
case "list_roles":
client.list_roles(command_dict)
case "show_role":
client.show_role(command_dict)
case "grant_permission":
client.grant_permission(command_dict)
case "revoke_permission":
client.revoke_permission(command_dict)
case "alter_user_role":
client.alter_user_role(command_dict)
case "show_user_permission":
client.show_user_permission(command_dict)
case "show_version":
client.show_version(command_dict)
case "grant_admin":
client.grant_admin(command_dict)
case "revoke_admin":
client.revoke_admin(command_dict)
case "generate_key":
client.generate_key(command_dict)
case "list_keys":
client.list_keys(command_dict)
case "drop_key":
client.drop_key(command_dict)
case "set_variable":
client.set_variable(command_dict)
case "show_variable":
client.show_variable(command_dict)
case "list_variables":
client.list_variables(command_dict)
case "list_configs":
client.list_configs(command_dict)
case "list_environments":
client.list_environments(command_dict)
case "create_model_provider":
client.create_model_provider(command_dict)
case "drop_model_provider":
client.drop_model_provider(command_dict)
case "show_current_user":
client.show_current_user(command_dict)
case "set_default_model":
client.set_default_model(command_dict)
case "reset_default_model":
client.reset_default_model(command_dict)
case "list_user_datasets":
return client.list_user_datasets(command_dict)
case "create_user_dataset":
client.create_user_dataset(command_dict)
case "drop_user_dataset":
client.drop_user_dataset(command_dict)
case "list_user_dataset_files":
return client.list_user_dataset_files(command_dict)
case "list_user_agents":
return client.list_user_agents(command_dict)
case "list_user_chats":
return client.list_user_chats(command_dict)
case "create_user_chat":
client.create_user_chat(command_dict)
case "drop_user_chat":
client.drop_user_chat(command_dict)
case "create_chat_session":
client.create_chat_session(command_dict)
case "drop_chat_session":
client.drop_chat_session(command_dict)
case "list_chat_sessions":
return client.list_chat_sessions(command_dict)
case "chat_on_session":
client.chat_on_session(command_dict)
case "list_user_model_providers":
client.list_user_model_providers(command_dict)
case "list_user_default_models":
client.list_user_default_models(command_dict)
case "parse_dataset_docs":
client.parse_dataset_docs(command_dict)
case "parse_dataset":
client.parse_dataset(command_dict)
case "import_docs_into_dataset":
client.import_docs_into_dataset(command_dict)
case "search_on_datasets":
return client.search_on_datasets(command_dict)
case "meta":
_handle_meta_command(command_dict)
case _:
print(f"Command '{command_type}' would be executed with API")
def _handle_meta_command(command: dict):
meta_command = command["command"]
args = command.get("args", [])
if meta_command in ["?", "h", "help"]:
show_help()
elif meta_command in ["q", "quit", "exit"]:
print("Goodbye!")
else:
print(f"Meta command '{meta_command}' with args {args}")
def show_help():
"""Help info"""
help_text = """
Commands:
LIST SERVICES
SHOW SERVICE <service>
STARTUP SERVICE <service>
SHUTDOWN SERVICE <service>
RESTART SERVICE <service>
LIST USERS
SHOW USER <user>
DROP USER <user>
CREATE USER <user> <password>
ALTER USER PASSWORD <user> <new_password>
ALTER USER ACTIVE <user> <on/off>
LIST DATASETS OF <user>
LIST AGENTS OF <user>
CREATE ROLE <role>
DROP ROLE <role>
ALTER ROLE <role> SET DESCRIPTION <description>
LIST ROLES
SHOW ROLE <role>
GRANT <action_list> ON <function> TO ROLE <role>
REVOKE <action_list> ON <function> TO ROLE <role>
ALTER USER <user> SET ROLE <role>
SHOW USER PERMISSION <user>
SHOW VERSION
GRANT ADMIN <user>
REVOKE ADMIN <user>
GENERATE KEY FOR USER <user>
LIST KEYS OF <user>
DROP KEY <key> OF <user>
Meta Commands:
\\?, \\h, \\help Show this help
\\q, \\quit, \\exit Quit the CLI
"""
print(help_text)
def run_benchmark(client: RAGFlowClient, command_dict: dict):
concurrency = command_dict.get("concurrency", 1)
iterations = command_dict.get("iterations", 1)
command: dict = command_dict["command"]
command.update({"iterations": iterations})
command_type = command["type"]
if concurrency < 1:
print("Concurrency must be greater than 0")
return
elif concurrency == 1:
result = run_command(client, command)
success_count: int = 0
response_list = result["response_list"]
for response in response_list:
match command_type:
case "ping_server":
if response.status_code == 200:
success_count += 1
case _:
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
success_count += 1
total_duration = result["duration"]
qps = iterations / total_duration if total_duration > 0 else None
print(f"command: {command}, Concurrency: {concurrency}, iterations: {iterations}")
print(
f"total duration: {total_duration:.4f}s, QPS: {qps}, COMMAND_COUNT: {iterations}, SUCCESS: {success_count}, FAILURE: {iterations - success_count}")
pass
else:
results: List[Optional[dict]] = [None] * concurrency
mp_context = mp.get_context("spawn")
start_time = time.perf_counter()
with ProcessPoolExecutor(max_workers=concurrency, mp_context=mp_context) as executor:
future_map = {
executor.submit(
run_command,
client,
command
): idx
for idx in range(concurrency)
}
for future in as_completed(future_map):
idx = future_map[future]
results[idx] = future.result()
end_time = time.perf_counter()
success_count = 0
for result in results:
response_list = result["response_list"]
for response in response_list:
match command_type:
case "ping_server":
if response.status_code == 200:
success_count += 1
case _:
res_json = response.json()
if response.status_code == 200 and res_json["code"] == 0:
success_count += 1
total_duration = end_time - start_time
total_command_count = iterations * concurrency
qps = total_command_count / total_duration if total_duration > 0 else None
print(f"command: {command}, Concurrency: {concurrency} , iterations: {iterations}")
print(
f"total duration: {total_duration:.4f}s, QPS: {qps}, COMMAND_COUNT: {total_command_count}, SUCCESS: {success_count}, FAILURE: {total_command_count - success_count}")
pass
| {
"repo_id": "infiniflow/ragflow",
"file_path": "admin/client/ragflow_client.py",
"license": "Apache License 2.0",
"lines": 1485,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:admin/client/user.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from http_client import HttpClient
class AuthException(Exception):
def __init__(self, message, code=401):
super().__init__(message)
self.code = code
self.message = message
def encrypt_password(password_plain: str) -> str:
try:
from api.utils.crypt import crypt
except Exception as exc:
raise AuthException(
"Password encryption unavailable; install pycryptodomex (uv sync --python 3.12 --group test)."
) from exc
return crypt(password_plain)
def register_user(client: HttpClient, email: str, nickname: str, password: str) -> None:
password_enc = encrypt_password(password)
payload = {"email": email, "nickname": nickname, "password": password_enc}
res = client.request_json("POST", "/user/register", use_api_base=False, auth_kind=None, json_body=payload)
if res.get("code") == 0:
return
msg = res.get("message", "")
if "has already registered" in msg:
return
raise AuthException(f"Register failed: {msg}")
def login_user(client: HttpClient, server_type: str, email: str, password: str) -> str:
password_enc = encrypt_password(password)
payload = {"email": email, "password": password_enc}
if server_type == "admin":
response = client.request("POST", "/admin/login", use_api_base=True, auth_kind=None, json_body=payload)
else:
response = client.request("POST", "/user/login", use_api_base=False, auth_kind=None, json_body=payload)
try:
res = response.json()
except Exception as exc:
raise AuthException(f"Login failed: invalid JSON response ({exc})") from exc
if res.get("code") != 0:
raise AuthException(f"Login failed: {res.get('message')}")
token = response.headers.get("Authorization")
if not token:
raise AuthException("Login failed: missing Authorization header")
return token
| {
"repo_id": "infiniflow/ragflow",
"file_path": "admin/client/user.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/testcases/test_web_api/test_api_app/test_api_tokens.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import api_new_token, api_rm_token, api_stats, api_token_list, batch_create_dialogs
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
INVALID_AUTH_CASES = [
(None, 401, "Unauthorized"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "Unauthorized"),
]
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_new_token(self, invalid_auth, expected_code, expected_fragment):
res = api_new_token(invalid_auth, {"dialog_id": "dummy_dialog_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_token_list(self, invalid_auth, expected_code, expected_fragment):
res = api_token_list(invalid_auth, {"dialog_id": "dummy_dialog_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_rm(self, invalid_auth, expected_code, expected_fragment):
res = api_rm_token(invalid_auth, {"tokens": ["dummy_token"], "tenant_id": "dummy_tenant"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_stats(self, invalid_auth, expected_code, expected_fragment):
res = api_stats(invalid_auth)
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.usefixtures("clear_dialogs")
class TestApiTokens:
@pytest.mark.p2
def test_token_lifecycle(self, WebApiAuth):
dialog_id = batch_create_dialogs(WebApiAuth, 1)[0]
create_res = api_new_token(WebApiAuth, {"dialog_id": dialog_id})
assert create_res["code"] == 0, create_res
token = create_res["data"]["token"]
tenant_id = create_res["data"]["tenant_id"]
list_res = api_token_list(WebApiAuth, {"dialog_id": dialog_id})
assert list_res["code"] == 0, list_res
assert any(item["token"] == token for item in list_res["data"]), list_res
rm_res = api_rm_token(WebApiAuth, {"tokens": [token], "tenant_id": tenant_id})
assert rm_res["code"] == 0, rm_res
assert rm_res["data"] is True, rm_res
@pytest.mark.p2
def test_stats_basic(self, WebApiAuth):
res = api_stats(WebApiAuth)
assert res["code"] == 0, res
for key in ["pv", "uv", "speed", "tokens", "round", "thumb_up"]:
assert key in res["data"], res
@pytest.mark.p3
def test_rm_missing_tokens(self, WebApiAuth):
res = api_rm_token(WebApiAuth, {"tenant_id": "dummy_tenant"})
assert res["code"] == 101, res
assert "required argument are missing" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_api_app/test_api_tokens.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_connector_app/test_connector_oauth_contract.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
import requests
from configs import HOST_ADDRESS, VERSION
CONNECTOR_BASE_URL = f"{HOST_ADDRESS}/{VERSION}/connector"
LLM_API_KEY_URL = f"{HOST_ADDRESS}/{VERSION}/llm/set_api_key"
LANGFUSE_API_KEY_URL = f"{HOST_ADDRESS}/{VERSION}/langfuse/api_key"
pytestmark = pytest.mark.p3
@pytest.fixture(autouse=True)
def _require_oauth_env(require_env_flag):
require_env_flag("RAGFLOW_E2E_OAUTH")
def _skip_unless_provider(allowed):
provider = os.getenv("RAGFLOW_OAUTH_PROVIDER")
if provider and provider not in allowed:
pytest.skip(f"RAGFLOW_OAUTH_PROVIDER={provider} not in {sorted(allowed)}")
def _assert_unauthorized(payload):
assert payload["code"] == 401, payload
assert "Unauthorized" in payload["message"], payload
def _assert_unauthorized_response(res, *, allow_405=False):
if allow_405 and res.status_code == 405:
pytest.skip("method not supported in this deployment")
content_type = res.headers.get("Content-Type", "")
payload = None
if "json" in content_type:
payload = res.json()
else:
try:
payload = res.json()
except ValueError:
assert False, f"Expected JSON response, status={res.status_code}, content_type={content_type}"
_assert_unauthorized(payload)
def _assert_callback_response(res, expected_fragment):
assert res.status_code in {200, 302}, {"status": res.status_code, "headers": dict(res.headers)}
if res.status_code == 200:
assert "text/html" in res.headers.get("Content-Type", ""), res.headers
assert expected_fragment in res.text
else:
location = res.headers.get("Location", "")
assert location, res.headers
markers = ("error", "oauth", "callback", "state", "code")
assert any(marker in location for marker in markers), location
def test_google_oauth_start_requires_auth():
_skip_unless_provider({"google", "google-drive", "gmail"})
res = requests.post(f"{CONNECTOR_BASE_URL}/google/oauth/web/start")
_assert_unauthorized(res.json())
def test_google_oauth_start_missing_credentials(WebApiAuth):
_skip_unless_provider({"google", "google-drive", "gmail"})
res = requests.post(f"{CONNECTOR_BASE_URL}/google/oauth/web/start", auth=WebApiAuth, json={})
payload = res.json()
assert payload["code"] == 101, payload
assert "required argument are missing" in payload["message"], payload
assert "credentials" in payload["message"], payload
@pytest.mark.parametrize("path", ["google-drive/oauth/web/callback", "gmail/oauth/web/callback"])
def test_google_oauth_callback_missing_state(path):
_skip_unless_provider({"google", "google-drive", "gmail"})
res = requests.get(f"{CONNECTOR_BASE_URL}/{path}", allow_redirects=False)
_assert_callback_response(res, "Missing OAuth state parameter.")
def test_google_oauth_result_missing_flow_id(WebApiAuth):
_skip_unless_provider({"google", "google-drive", "gmail"})
res = requests.post(
f"{CONNECTOR_BASE_URL}/google/oauth/web/result",
params={"type": "google-drive"},
auth=WebApiAuth,
json={},
)
payload = res.json()
assert payload["code"] == 101, payload
assert "required argument are missing" in payload["message"], payload
assert "flow_id" in payload["message"], payload
def test_box_oauth_start_missing_params(WebApiAuth):
_skip_unless_provider({"box"})
res = requests.post(f"{CONNECTOR_BASE_URL}/box/oauth/web/start", auth=WebApiAuth, json={})
payload = res.json()
assert payload["code"] == 101, payload
assert "client_id" in payload["message"], payload
assert "client_secret" in payload["message"], payload
def test_box_oauth_callback_missing_state():
_skip_unless_provider({"box"})
res = requests.get(f"{CONNECTOR_BASE_URL}/box/oauth/web/callback", allow_redirects=False)
_assert_callback_response(res, "Missing OAuth parameters.")
def test_box_oauth_result_missing_flow_id(WebApiAuth):
_skip_unless_provider({"box"})
res = requests.post(f"{CONNECTOR_BASE_URL}/box/oauth/web/result", auth=WebApiAuth, json={})
payload = res.json()
assert payload["code"] == 101, payload
assert "required argument are missing" in payload["message"], payload
assert "flow_id" in payload["message"], payload
def test_langfuse_api_key_requires_auth():
res = requests.post(LANGFUSE_API_KEY_URL, json={})
_assert_unauthorized_response(res)
def test_langfuse_api_key_requires_auth_get():
res = requests.get(LANGFUSE_API_KEY_URL)
_assert_unauthorized_response(res, allow_405=True)
def test_langfuse_api_key_requires_auth_put():
res = requests.put(LANGFUSE_API_KEY_URL, json={})
_assert_unauthorized_response(res, allow_405=True)
def test_llm_set_api_key_requires_auth():
res = requests.post(LLM_API_KEY_URL, json={})
_assert_unauthorized_response(res)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_connector_app/test_connector_oauth_contract.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_document_app/test_document_metadata.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from types import SimpleNamespace
import pytest
from common import (
document_change_status,
document_filter,
document_infos,
document_metadata_summary,
document_rename,
document_set_meta,
document_update_metadata_setting,
)
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
INVALID_AUTH_CASES = [
(None, 401, "Unauthorized"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "Unauthorized"),
]
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_filter_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = document_filter(invalid_auth, {"kb_id": "kb_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_infos_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = document_infos(invalid_auth, {"doc_ids": ["doc_id"]})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
## The inputs has been changed to add 'doc_ids'
## TODO:
#@pytest.mark.p2
#@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
#def test_metadata_summary_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
# res = document_metadata_summary(invalid_auth, {"kb_id": "kb_id"})
# assert res["code"] == expected_code, res
# assert expected_fragment in res["message"], res
## The inputs has been changed to deprecate 'selector'
## TODO:
#@pytest.mark.p2
#@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
#def test_metadata_update_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
# res = document_metadata_update(invalid_auth, {"kb_id": "kb_id", "selector": {"document_ids": ["doc_id"]}, "updates": []})
# assert res["code"] == expected_code, res
# assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_update_metadata_setting_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = document_update_metadata_setting(invalid_auth, {"doc_id": "doc_id", "metadata": {}})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_change_status_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = document_change_status(invalid_auth, {"doc_ids": ["doc_id"], "status": "1"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_rename_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = document_rename(invalid_auth, {"doc_id": "doc_id", "name": "rename.txt"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_set_meta_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = document_set_meta(invalid_auth, {"doc_id": "doc_id", "meta": "{}"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
class TestDocumentMetadata:
@pytest.mark.p2
def test_filter(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
res = document_filter(WebApiAuth, {"kb_id": kb_id})
assert res["code"] == 0, res
assert "filter" in res["data"], res
assert "total" in res["data"], res
@pytest.mark.p2
def test_infos(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_infos(WebApiAuth, {"doc_ids": [doc_id]})
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
assert res["data"][0]["id"] == doc_id, res
## The inputs has been changed to add 'doc_ids'
## TODO:
#@pytest.mark.p2
#def test_metadata_summary(self, WebApiAuth, add_document_func):
# kb_id, _ = add_document_func
# res = document_metadata_summary(WebApiAuth, {"kb_id": kb_id})
# assert res["code"] == 0, res
# assert isinstance(res["data"]["summary"], dict), res
## The inputs has been changed to deprecate 'selector'
## TODO:
#@pytest.mark.p2
#def test_metadata_update(self, WebApiAuth, add_document_func):
# kb_id, doc_id = add_document_func
# payload = {
# "kb_id": kb_id,
# "selector": {"document_ids": [doc_id]},
# "updates": [{"key": "author", "value": "alice"}],
# "deletes": [],
# }
# res = document_metadata_update(WebApiAuth, payload)
# assert res["code"] == 0, res
# assert res["data"]["matched_docs"] == 1, res
# info_res = document_infos(WebApiAuth, {"doc_ids": [doc_id]})
# assert info_res["code"] == 0, info_res
# meta_fields = info_res["data"][0].get("meta_fields", {})
# assert meta_fields.get("author") == "alice", info_res
## The inputs has been changed to deprecate 'selector'
## TODO:
#@pytest.mark.p2
#def test_update_metadata_setting(self, WebApiAuth, add_document_func):
# _, doc_id = add_document_func
# metadata = {"source": "test"}
# res = document_update_metadata_setting(WebApiAuth, {"doc_id": doc_id, "metadata": metadata})
# assert res["code"] == 0, res
# assert res["data"]["id"] == doc_id, res
# assert res["data"]["parser_config"]["metadata"] == metadata, res
@pytest.mark.p2
def test_change_status(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_change_status(WebApiAuth, {"doc_ids": [doc_id], "status": "1"})
assert res["code"] == 0, res
assert res["data"][doc_id]["status"] == "1", res
info_res = document_infos(WebApiAuth, {"doc_ids": [doc_id]})
assert info_res["code"] == 0, info_res
assert info_res["data"][0]["status"] == "1", info_res
@pytest.mark.p2
def test_rename(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
name = f"renamed_{doc_id}.txt"
res = document_rename(WebApiAuth, {"doc_id": doc_id, "name": name})
assert res["code"] == 0, res
assert res["data"] is True, res
info_res = document_infos(WebApiAuth, {"doc_ids": [doc_id]})
assert info_res["code"] == 0, info_res
assert info_res["data"][0]["name"] == name, info_res
@pytest.mark.p2
def test_set_meta(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_set_meta(WebApiAuth, {"doc_id": doc_id, "meta": "{\"author\": \"alice\"}"})
assert res["code"] == 0, res
assert res["data"] is True, res
info_res = document_infos(WebApiAuth, {"doc_ids": [doc_id]})
assert info_res["code"] == 0, info_res
meta_fields = info_res["data"][0].get("meta_fields", {})
assert meta_fields.get("author") == "alice", info_res
class TestDocumentMetadataNegative:
@pytest.mark.p3
def test_filter_missing_kb_id(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_filter(WebApiAuth, {"doc_ids": [doc_id]})
assert res["code"] == 101, res
assert "KB ID" in res["message"], res
@pytest.mark.p3
def test_metadata_summary_missing_kb_id(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_metadata_summary(WebApiAuth, {"doc_ids": [doc_id]})
assert res["code"] == 101, res
assert "KB ID" in res["message"], res
## The inputs has been changed to deprecate 'selector'
## TODO:
#@pytest.mark.p3
#def test_metadata_update_missing_kb_id(self, WebApiAuth, add_document_func):
# _, doc_id = add_document_func
# res = document_metadata_update(WebApiAuth, {"selector": {"document_ids": [doc_id]}, "updates": []})
# assert res["code"] == 101, res
# assert "KB ID" in res["message"], res
@pytest.mark.p3
def test_infos_invalid_doc_id(self, WebApiAuth):
res = document_infos(WebApiAuth, {"doc_ids": ["invalid_id"]})
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_update_metadata_setting_missing_metadata(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_update_metadata_setting(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 101, res
assert "required argument are missing" in res["message"], res
assert "metadata" in res["message"], res
@pytest.mark.p3
def test_change_status_invalid_status(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_change_status(WebApiAuth, {"doc_ids": [doc_id], "status": "2"})
assert res["code"] == 101, res
assert "Status" in res["message"], res
@pytest.mark.p3
def test_rename_extension_mismatch(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_rename(WebApiAuth, {"doc_id": doc_id, "name": "renamed.pdf"})
assert res["code"] == 101, res
assert "extension" in res["message"], res
@pytest.mark.p3
def test_set_meta_invalid_type(self, WebApiAuth, add_document_func):
_, doc_id = add_document_func
res = document_set_meta(WebApiAuth, {"doc_id": doc_id, "meta": "[]"})
assert res["code"] == 101, res
assert "dictionary" in res["message"], res
def _run(coro):
return asyncio.run(coro)
class _DummyArgs:
def __init__(self, args=None):
self._args = args or {}
def get(self, key, default=None):
return self._args.get(key, default)
def getlist(self, key):
value = self._args.get(key, [])
if isinstance(value, list):
return value
return [value]
class _DummyRequest:
def __init__(self, args=None):
self.args = _DummyArgs(args)
class _DummyResponse:
def __init__(self, data=None):
self.data = data
self.headers = {}
@pytest.mark.p2
class TestDocumentMetadataUnit:
def _allow_kb(self, module, monkeypatch, kb_id="kb1", tenant_id="tenant1"):
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id=tenant_id)])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: True if _kwargs.get("id") == kb_id else False)
def test_filter_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.get_filter())
assert res["code"] == 101
assert "KB ID" in res["message"]
def test_filter_unauthorized(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant1")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: False)
async def fake_request_json():
return {"kb_id": "kb1"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.get_filter())
assert res["code"] == 103
def test_filter_invalid_filters(self, document_app_module, monkeypatch):
module = document_app_module
self._allow_kb(module, monkeypatch)
async def fake_request_json():
return {"kb_id": "kb1", "run_status": ["INVALID"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.get_filter())
assert res["code"] == 102
assert "Invalid filter run status" in res["message"]
async def fake_request_json_types():
return {"kb_id": "kb1", "types": ["INVALID"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json_types)
res = _run(module.get_filter())
assert res["code"] == 102
assert "Invalid filter conditions" in res["message"]
def test_filter_keywords_suffix(self, document_app_module, monkeypatch):
module = document_app_module
self._allow_kb(module, monkeypatch)
monkeypatch.setattr(module.DocumentService, "get_filter_by_kb_id", lambda *_args, **_kwargs: ({"run": {}}, 1))
async def fake_request_json():
return {"kb_id": "kb1", "keywords": "ragflow", "suffix": ["txt"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.get_filter())
assert res["code"] == 0
assert "filter" in res["data"]
def test_filter_exception(self, document_app_module, monkeypatch):
module = document_app_module
self._allow_kb(module, monkeypatch)
def raise_error(*_args, **_kwargs):
raise RuntimeError("boom")
monkeypatch.setattr(module.DocumentService, "get_filter_by_kb_id", raise_error)
async def fake_request_json():
return {"kb_id": "kb1"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.get_filter())
assert res["code"] == 100
def test_infos_meta_fields(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
class _Docs:
def dicts(self):
return [{"id": "doc1"}]
monkeypatch.setattr(module.DocumentService, "get_by_ids", lambda _ids: _Docs())
monkeypatch.setattr(module.DocMetadataService, "get_document_metadata", lambda _doc_id: {"author": "alice"})
async def fake_request_json():
return {"doc_ids": ["doc1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.doc_infos())
assert res["code"] == 0
assert res["data"][0]["meta_fields"]["author"] == "alice"
def test_metadata_summary_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"doc_ids": ["doc1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.metadata_summary())
assert res["code"] == 101
def test_metadata_summary_unauthorized(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant1")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: False)
async def fake_request_json():
return {"kb_id": "kb1", "doc_ids": ["doc1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.metadata_summary())
assert res["code"] == 103
def test_metadata_summary_success_and_exception(self, document_app_module, monkeypatch):
module = document_app_module
self._allow_kb(module, monkeypatch)
monkeypatch.setattr(module.DocMetadataService, "get_metadata_summary", lambda *_args, **_kwargs: {"author": {"alice": 1}})
async def fake_request_json():
return {"kb_id": "kb1", "doc_ids": ["doc1"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.metadata_summary())
assert res["code"] == 0
assert "summary" in res["data"]
def raise_error(*_args, **_kwargs):
raise RuntimeError("boom")
monkeypatch.setattr(module.DocMetadataService, "get_metadata_summary", raise_error)
res = _run(module.metadata_summary())
assert res["code"] == 100
def test_metadata_update_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"doc_ids": ["doc1"], "updates": [], "deletes": []}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.metadata_update.__wrapped__())
assert res["code"] == 101
assert "KB ID" in res["message"]
def test_metadata_update_success(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module.DocMetadataService, "batch_update_metadata", lambda *_args, **_kwargs: 1)
async def fake_request_json():
return {"kb_id": "kb1", "doc_ids": ["doc1"], "updates": [{"key": "author", "value": "alice"}], "deletes": []}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.metadata_update.__wrapped__())
assert res["code"] == 0
assert res["data"]["matched_docs"] == 1
def test_metadata_update_invalid_delete_item_unit(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"kb_id": "kb1", "doc_ids": ["doc1"], "updates": [], "deletes": [{}]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.metadata_update.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "Each delete requires key." in res["message"]
def test_update_metadata_setting_authorization_and_refetch_not_found_unit(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"doc_id": "doc1", "metadata": {"author": "alice"}}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: False)
res = _run(module.update_metadata_setting.__wrapped__())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "No authorization." in res["message"]
doc = SimpleNamespace(id="doc1", to_dict=lambda: {"id": "doc1", "parser_config": {}})
state = {"count": 0}
def fake_get_by_id(_doc_id):
state["count"] += 1
if state["count"] == 1:
return True, doc
return False, None
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_id", fake_get_by_id)
monkeypatch.setattr(module.DocumentService, "update_parser_config", lambda *_args, **_kwargs: True)
res = _run(module.update_metadata_setting.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Document not found!" in res["message"]
def test_thumbnails_missing_ids_rewrite_and_exception_unit(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(args={}))
res = module.thumbnails()
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert 'Lack of "Document ID"' in res["message"]
monkeypatch.setattr(module, "request", _DummyRequest(args={"doc_ids": ["doc1", "doc2"]}))
monkeypatch.setattr(
module.DocumentService,
"get_thumbnails",
lambda _doc_ids: [
{"id": "doc1", "kb_id": "kb1", "thumbnail": "thumb.jpg"},
{"id": "doc2", "kb_id": "kb1", "thumbnail": f"{module.IMG_BASE64_PREFIX}blob"},
],
)
res = module.thumbnails()
assert res["code"] == 0
assert res["data"]["doc1"] == "/v1/document/image/kb1-thumb.jpg"
assert res["data"]["doc2"] == f"{module.IMG_BASE64_PREFIX}blob"
def raise_error(*_args, **_kwargs):
raise RuntimeError("thumb boom")
monkeypatch.setattr(module.DocumentService, "get_thumbnails", raise_error)
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = module.thumbnails()
assert res["code"] == 500
assert "thumb boom" in res["message"]
def test_change_status_partial_failure_matrix_unit(self, document_app_module, monkeypatch):
module = document_app_module
calls = {"docstore_update": []}
doc_ids = ["unauth", "missing_doc", "missing_kb", "update_fail", "docstore_3022", "docstore_generic", "outer_exc"]
async def fake_request_json():
return {"doc_ids": doc_ids, "status": "1"}
def fake_accessible(doc_id, _uid):
return doc_id != "unauth"
def fake_get_by_id(doc_id):
if doc_id == "missing_doc":
return False, None
if doc_id == "outer_exc":
raise RuntimeError("explode")
kb_id = "kb_missing" if doc_id == "missing_kb" else "kb1"
chunk_num = 1 if doc_id in {"docstore_3022", "docstore_generic"} else 0
doc = SimpleNamespace(id=doc_id, kb_id=kb_id, status="0", chunk_num=chunk_num)
return True, doc
def fake_get_kb(kb_id):
if kb_id == "kb_missing":
return False, None
return True, SimpleNamespace(tenant_id="tenant1")
def fake_update_by_id(doc_id, _payload):
return doc_id != "update_fail"
class _DocStore:
def update(self, where, _payload, _index_name, _kb_id):
calls["docstore_update"].append(where["doc_id"])
if where["doc_id"] == "docstore_3022":
raise RuntimeError("3022 table missing")
if where["doc_id"] == "docstore_generic":
raise RuntimeError("doc store down")
return True
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.DocumentService, "accessible", fake_accessible)
monkeypatch.setattr(module.DocumentService, "get_by_id", fake_get_by_id)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda kb_id: fake_get_kb(kb_id))
monkeypatch.setattr(module.DocumentService, "update_by_id", fake_update_by_id)
monkeypatch.setattr(module.settings, "docStoreConn", _DocStore())
monkeypatch.setattr(module.search, "index_name", lambda tenant_id: f"idx_{tenant_id}")
res = _run(module.change_status.__wrapped__())
assert res["code"] == module.RetCode.SERVER_ERROR
assert res["message"] == "Partial failure"
assert res["data"]["unauth"]["error"] == "No authorization."
assert res["data"]["missing_doc"]["error"] == "No authorization."
assert res["data"]["missing_kb"]["error"] == "Can't find this dataset!"
assert res["data"]["update_fail"]["error"] == "Database error (Document update)!"
assert res["data"]["docstore_3022"]["error"] == "Document store table missing."
assert "Document store update failed:" in res["data"]["docstore_generic"]["error"]
assert "Internal server error: explode" == res["data"]["outer_exc"]["error"]
assert calls["docstore_update"] == ["docstore_3022", "docstore_generic"]
def test_change_status_invalid_status_unit(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"doc_ids": ["doc1"], "status": "2"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.change_status.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert '"Status" must be either 0 or 1!' in res["message"]
def test_change_status_all_success_unit(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"doc_ids": ["doc1"], "status": "1"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(id="doc1", kb_id="kb1", status="0", chunk_num=0)))
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, SimpleNamespace(tenant_id="tenant1")))
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
res = _run(module.change_status.__wrapped__())
assert res["code"] == 0
assert res["data"]["doc1"]["status"] == "1"
def test_rename_branch_matrix_and_exception_unit(self, document_app_module, monkeypatch):
module = document_app_module
file_updates = []
es_updates = []
async def fake_thread_pool_exec(func, *_args, **_kwargs):
return func()
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant1")
monkeypatch.setattr(module.rag_tokenizer, "tokenize", lambda _name: ["token"])
monkeypatch.setattr(module.rag_tokenizer, "fine_grained_tokenize", lambda _tokens: ["fine"])
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
class _DocStore:
def index_exist(self, _index_name, _kb_id):
return True
def update(self, where, payload, _index_name, _kb_id):
es_updates.append((where, payload))
monkeypatch.setattr(module.settings, "docStoreConn", _DocStore())
monkeypatch.setattr(module.search, "index_name", lambda tenant_id: f"idx_{tenant_id}")
def set_req(name):
async def fake_request_json():
return {"doc_id": "doc1", "name": name}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
set_req("renamed.txt")
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: False)
res = _run(module.rename.__wrapped__())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.rename.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Document not found!" in res["message"]
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(id="doc1", name="origin.txt", kb_id="kb1")))
set_req("renamed.pdf")
res = _run(module.rename.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "extension" in res["message"]
too_long = "a" * (module.FILE_NAME_LEN_LIMIT + 1) + ".txt"
set_req(too_long)
res = _run(module.rename.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "bytes or less" in res["message"]
set_req("dup.txt")
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [SimpleNamespace(name="dup.txt")])
res = _run(module.rename.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Duplicated document name" in res["message"]
set_req("ok.txt")
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.File2DocumentService, "get_by_document_id", lambda _doc_id: [SimpleNamespace(file_id="file1")])
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="file1")))
monkeypatch.setattr(module.FileService, "update_by_id", lambda file_id, payload: file_updates.append((file_id, payload)))
res = _run(module.rename.__wrapped__())
assert res["code"] == 0
assert file_updates == [("file1", {"name": "ok.txt"})]
assert es_updates[0][0] == {"doc_id": "doc1"}
assert es_updates[0][1]["docnm_kwd"] == "ok.txt"
assert es_updates[0][1]["title_tks"] == ["token"]
assert es_updates[0][1]["title_sm_tks"] == ["fine"]
def raise_db_error(*_args, **_kwargs):
raise RuntimeError("rename boom")
monkeypatch.setattr(module.DocumentService, "update_by_id", raise_db_error)
res = _run(module.rename.__wrapped__())
assert res["code"] == 500
assert "rename boom" in res["message"]
def test_get_route_not_found_success_and_exception_unit(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.get("doc1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Document not found!" in res["message"]
async def fake_thread_pool_exec(*_args, **_kwargs):
return b"blob-data"
async def fake_make_response(data):
return _DummyResponse(data)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(name="image.abc", type=module.FileType.VISUAL.value)))
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("bucket", "name"))
monkeypatch.setattr(module.settings, "STORAGE_IMPL", SimpleNamespace(get=lambda *_args, **_kwargs: b"blob-data"))
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
monkeypatch.setattr(module, "make_response", fake_make_response)
monkeypatch.setattr(
module,
"apply_safe_file_response_headers",
lambda response, content_type, extension: response.headers.update({"content_type": content_type, "extension": extension}),
)
res = _run(module.get("doc1"))
assert isinstance(res, _DummyResponse)
assert res.data == b"blob-data"
assert res.headers["content_type"] == "image/abc"
assert res.headers["extension"] == "abc"
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (_ for _ in ()).throw(RuntimeError("get boom")))
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.get("doc1"))
assert res["code"] == 500
assert "get boom" in res["message"]
def test_download_attachment_success_and_exception_unit(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(args={"ext": "abc"}))
async def fake_thread_pool_exec(*_args, **_kwargs):
return b"attachment"
async def fake_make_response(data):
return _DummyResponse(data)
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
monkeypatch.setattr(module, "make_response", fake_make_response)
monkeypatch.setattr(module.settings, "STORAGE_IMPL", SimpleNamespace(get=lambda *_args, **_kwargs: b"attachment"))
monkeypatch.setattr(
module,
"apply_safe_file_response_headers",
lambda response, content_type, extension: response.headers.update({"content_type": content_type, "extension": extension}),
)
res = _run(module.download_attachment("att1"))
assert isinstance(res, _DummyResponse)
assert res.data == b"attachment"
assert res.headers["content_type"] == "application/abc"
assert res.headers["extension"] == "abc"
async def raise_error(*_args, **_kwargs):
raise RuntimeError("download boom")
monkeypatch.setattr(module, "thread_pool_exec", raise_error)
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.download_attachment("att1"))
assert res["code"] == 500
assert "download boom" in res["message"]
def test_change_parser_guards_and_reset_update_failure_unit(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
async def req_auth_fail():
return {"doc_id": "doc1", "parser_id": "naive", "pipeline_id": "pipe2"}
monkeypatch.setattr(module, "get_request_json", req_auth_fail)
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: False)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.change_parser.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Document not found!" in res["message"]
async def req_same_pipeline():
return {"doc_id": "doc1", "parser_id": "naive", "pipeline_id": "pipe1"}
doc_same = SimpleNamespace(
id="doc1",
pipeline_id="pipe1",
parser_id="naive",
parser_config={"k": "v"},
token_num=0,
chunk_num=0,
process_duration=0,
kb_id="kb1",
type="doc",
name="doc.txt",
)
monkeypatch.setattr(module, "get_request_json", req_same_pipeline)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, doc_same))
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
calls = []
async def req_pipeline_change():
return {"doc_id": "doc1", "parser_id": "naive", "pipeline_id": "pipe2"}
doc = SimpleNamespace(
id="doc1",
pipeline_id="pipe1",
parser_id="naive",
parser_config={},
token_num=0,
chunk_num=0,
process_duration=0,
kb_id="kb1",
type="doc",
name="doc.txt",
)
def fake_update_by_id(doc_id, payload):
calls.append((doc_id, payload))
return True
monkeypatch.setattr(module, "get_request_json", req_pipeline_change)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, doc))
monkeypatch.setattr(module.DocumentService, "update_by_id", fake_update_by_id)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
assert calls[0][1] == {"pipeline_id": "pipe2"}
assert calls[1][1]["run"] == module.TaskStatus.UNSTART.value
doc.token_num = 3
doc.chunk_num = 2
doc.process_duration = 9
monkeypatch.setattr(module.DocumentService, "increment_chunk_num", lambda *_args, **_kwargs: False)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
monkeypatch.setattr(module.DocumentService, "increment_chunk_num", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: None)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
side_effects = {"img": [], "delete": []}
class _DocStore:
def index_exist(self, _idx, _kb_id):
return True
def delete(self, where, _idx, kb_id):
side_effects["delete"].append((where["doc_id"], kb_id))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant1")
monkeypatch.setattr(module.DocumentService, "delete_chunk_images", lambda _doc, _tenant: side_effects["img"].append((_doc.id, _tenant)))
monkeypatch.setattr(module.search, "index_name", lambda tenant_id: f"idx_{tenant_id}")
monkeypatch.setattr(module.settings, "docStoreConn", _DocStore())
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
assert ("doc1", "tenant1") in side_effects["img"]
assert ("doc1", "kb1") in side_effects["delete"]
async def req_same_parser_with_cfg():
return {"doc_id": "doc1", "parser_id": "naive", "parser_config": {"a": 1}}
doc_same_parser = SimpleNamespace(
id="doc1",
pipeline_id="pipe1",
parser_id="naive",
parser_config={"a": 1},
token_num=0,
chunk_num=0,
process_duration=0,
kb_id="kb1",
type="doc",
name="doc.txt",
)
monkeypatch.setattr(module, "get_request_json", req_same_parser_with_cfg)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, doc_same_parser))
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
async def req_same_parser_no_cfg():
return {"doc_id": "doc1", "parser_id": "naive"}
monkeypatch.setattr(module, "get_request_json", req_same_parser_no_cfg)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
parser_cfg_updates = []
async def req_parser_update():
return {"doc_id": "doc1", "parser_id": "paper", "pipeline_id": "", "parser_config": {"beta": True}}
doc_parser_update = SimpleNamespace(
id="doc1",
pipeline_id="pipe1",
parser_id="naive",
parser_config={"alpha": 1},
token_num=0,
chunk_num=0,
process_duration=0,
kb_id="kb1",
type="doc",
name="doc.txt",
)
monkeypatch.setattr(module, "get_request_json", req_parser_update)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, doc_parser_update))
monkeypatch.setattr(module.DocumentService, "update_parser_config", lambda doc_id, cfg: parser_cfg_updates.append((doc_id, cfg)))
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 0
assert parser_cfg_updates == [("doc1", {"beta": True})]
def raise_parser_config(*_args, **_kwargs):
raise RuntimeError("parser boom")
monkeypatch.setattr(module.DocumentService, "update_parser_config", raise_parser_config)
res = _run(module.change_parser.__wrapped__())
assert res["code"] == 500
assert "parser boom" in res["message"]
def test_get_image_success_and_exception_unit(self, document_app_module, monkeypatch):
module = document_app_module
class _Headers(dict):
def set(self, key, value):
self[key] = value
class _ImageResponse:
def __init__(self, data):
self.data = data
self.headers = _Headers()
async def fake_thread_pool_exec(*_args, **_kwargs):
return b"image-bytes"
async def fake_make_response(data):
return _ImageResponse(data)
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
monkeypatch.setattr(module, "make_response", fake_make_response)
monkeypatch.setattr(module.settings, "STORAGE_IMPL", SimpleNamespace(get=lambda *_args, **_kwargs: b"image-bytes"))
res = _run(module.get_image("bucket-name"))
assert isinstance(res, _ImageResponse)
assert res.data == b"image-bytes"
assert res.headers["Content-Type"] == "image/JPEG"
async def raise_error(*_args, **_kwargs):
raise RuntimeError("image boom")
monkeypatch.setattr(module, "thread_pool_exec", raise_error)
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
res = _run(module.get_image("bucket-name"))
assert res["code"] == 500
assert "image boom" in res["message"]
def test_set_meta_validation_and_persistence_matrix_unit(self, document_app_module, monkeypatch):
module = document_app_module
def set_req(payload):
async def fake_request_json():
return payload
monkeypatch.setattr(module, "get_request_json", fake_request_json)
set_req({"doc_id": "doc1", "meta": "{}"})
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: False)
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
set_req({"doc_id": "doc1", "meta": "[]"})
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "Only dictionary type supported." in res["message"]
set_req({"doc_id": "doc1", "meta": '{"tags":[{"x":1}]}'})
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "The type is not supported in list" in res["message"]
set_req({"doc_id": "doc1", "meta": '{"obj":{"x":1}}'})
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "The type is not supported" in res["message"]
set_req({"doc_id": "doc1", "meta": "{"})
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "Json syntax error:" in res["message"]
set_req({"doc_id": "doc1", "meta": '{"author":"alice"}'})
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Document not found!" in res["message"]
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(id="doc1")))
monkeypatch.setattr(module.DocMetadataService, "update_document_metadata", lambda *_args, **_kwargs: False)
res = _run(module.set_meta.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Database error (meta updates)!" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_document_app/test_document_metadata.py",
"license": "Apache License 2.0",
"lines": 799,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_kb_pipeline_tasks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
kb_delete_pipeline_logs,
kb_list_pipeline_dataset_logs,
kb_list_pipeline_logs,
kb_pipeline_log_detail,
kb_run_graphrag,
kb_run_mindmap,
kb_run_raptor,
kb_trace_graphrag,
kb_trace_mindmap,
kb_trace_raptor,
list_documents,
parse_documents,
)
from utils import wait_for
TASK_STATUS_DONE = "3"
def _find_task(data, task_id):
if isinstance(data, dict):
if data.get("id") == task_id:
return data
tasks = data.get("tasks")
if isinstance(tasks, list):
for item in tasks:
if isinstance(item, dict) and item.get("id") == task_id:
return item
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) and item.get("id") == task_id:
return item
return None
def _assert_progress_in_scale(progress, payload):
assert isinstance(progress, (int, float)), payload
if progress < 0:
assert False, f"Negative progress is not expected: {payload}"
scale = 100 if progress > 1 else 1
# Infer scale from observed payload (0..1 or 0..100).
assert 0 <= progress <= scale, payload
return scale
def _wait_for_task(trace_func, auth, kb_id, task_id, timeout=60):
@wait_for(timeout, 1, "Pipeline task trace timeout")
def _condition():
res = trace_func(auth, {"kb_id": kb_id})
if res["code"] != 0:
return False
return _find_task(res["data"], task_id) is not None
_condition()
def _wait_for_docs_parsed(auth, kb_id, timeout=60):
@wait_for(timeout, 2, "Document parsing timeout")
def _condition():
res = list_documents(auth, {"kb_id": kb_id})
if res["code"] != 0:
return False
for doc in res["data"]["docs"]:
progress = doc.get("progress", 0)
_assert_progress_in_scale(progress, doc)
scale = 100 if progress > 1 else 1
if doc.get("run") != TASK_STATUS_DONE or progress < scale:
return False
return True
_condition()
def _wait_for_pipeline_logs(auth, kb_id, timeout=30):
@wait_for(timeout, 1, "Pipeline log timeout")
def _condition():
res = kb_list_pipeline_logs(auth, params={"kb_id": kb_id}, payload={})
if res["code"] != 0:
return False
return bool(res["data"]["logs"])
_condition()
class TestKbPipelineTasks:
@pytest.mark.p3
def test_graphrag_run_and_trace(self, WebApiAuth, add_chunks):
kb_id, _, _ = add_chunks
run_res = kb_run_graphrag(WebApiAuth, {"kb_id": kb_id})
assert run_res["code"] == 0, run_res
task_id = run_res["data"]["graphrag_task_id"]
assert task_id, run_res
_wait_for_task(kb_trace_graphrag, WebApiAuth, kb_id, task_id)
trace_res = kb_trace_graphrag(WebApiAuth, {"kb_id": kb_id})
assert trace_res["code"] == 0, trace_res
task = _find_task(trace_res["data"], task_id)
assert task, trace_res
assert task["id"] == task_id, trace_res
progress = task.get("progress")
_assert_progress_in_scale(progress, task)
@pytest.mark.p3
def test_raptor_run_and_trace(self, WebApiAuth, add_chunks):
kb_id, _, _ = add_chunks
run_res = kb_run_raptor(WebApiAuth, {"kb_id": kb_id})
assert run_res["code"] == 0, run_res
task_id = run_res["data"]["raptor_task_id"]
assert task_id, run_res
_wait_for_task(kb_trace_raptor, WebApiAuth, kb_id, task_id)
trace_res = kb_trace_raptor(WebApiAuth, {"kb_id": kb_id})
assert trace_res["code"] == 0, trace_res
task = _find_task(trace_res["data"], task_id)
assert task, trace_res
assert task["id"] == task_id, trace_res
progress = task.get("progress")
_assert_progress_in_scale(progress, task)
@pytest.mark.p3
def test_mindmap_run_and_trace(self, WebApiAuth, add_chunks):
kb_id, _, _ = add_chunks
run_res = kb_run_mindmap(WebApiAuth, {"kb_id": kb_id})
assert run_res["code"] == 0, run_res
task_id = run_res["data"]["mindmap_task_id"]
assert task_id, run_res
_wait_for_task(kb_trace_mindmap, WebApiAuth, kb_id, task_id)
trace_res = kb_trace_mindmap(WebApiAuth, {"kb_id": kb_id})
assert trace_res["code"] == 0, trace_res
task = _find_task(trace_res["data"], task_id)
assert task, trace_res
assert task["id"] == task_id, trace_res
progress = task.get("progress")
_assert_progress_in_scale(progress, task)
class TestKbPipelineLogs:
@pytest.mark.p3
def test_pipeline_log_lifecycle(self, WebApiAuth, add_document):
kb_id, document_id = add_document
parse_documents(WebApiAuth, {"doc_ids": [document_id], "run": "1"})
_wait_for_docs_parsed(WebApiAuth, kb_id)
_wait_for_pipeline_logs(WebApiAuth, kb_id)
list_res = kb_list_pipeline_logs(WebApiAuth, params={"kb_id": kb_id}, payload={})
assert list_res["code"] == 0, list_res
assert "total" in list_res["data"], list_res
assert isinstance(list_res["data"]["logs"], list), list_res
assert list_res["data"]["logs"], list_res
log_id = list_res["data"]["logs"][0]["id"]
detail_res = kb_pipeline_log_detail(WebApiAuth, {"log_id": log_id})
assert detail_res["code"] == 0, detail_res
detail = detail_res["data"]
assert detail["id"] == log_id, detail_res
assert detail["kb_id"] == kb_id, detail_res
for key in ["document_id", "task_type", "operation_status", "progress"]:
assert key in detail, detail_res
delete_res = kb_delete_pipeline_logs(WebApiAuth, params={"kb_id": kb_id}, payload={"log_ids": [log_id]})
assert delete_res["code"] == 0, delete_res
assert delete_res["data"] is True, delete_res
@wait_for(30, 1, "Pipeline log delete timeout")
def _condition():
res = kb_list_pipeline_logs(WebApiAuth, params={"kb_id": kb_id}, payload={})
if res["code"] != 0:
return False
return all(log.get("id") != log_id for log in res["data"]["logs"])
_condition()
@pytest.mark.p3
def test_list_pipeline_dataset_logs(self, WebApiAuth, add_document):
kb_id, _ = add_document
res = kb_list_pipeline_dataset_logs(WebApiAuth, params={"kb_id": kb_id}, payload={})
assert res["code"] == 0, res
assert "total" in res["data"], res
assert isinstance(res["data"]["logs"], list), res
@pytest.mark.p3
def test_pipeline_log_detail_missing_id(self, WebApiAuth):
res = kb_pipeline_log_detail(WebApiAuth, {})
assert res["code"] == 101, res
assert "Pipeline log ID" in res["message"], res
@pytest.mark.p3
def test_delete_pipeline_logs_empty(self, WebApiAuth, add_document):
kb_id, _ = add_document
res = kb_delete_pipeline_logs(WebApiAuth, params={"kb_id": kb_id}, payload={"log_ids": []})
assert res["code"] == 0, res
assert res["data"] is True, res
@pytest.mark.p3
def test_list_pipeline_logs_missing_kb_id(self, WebApiAuth):
res = kb_list_pipeline_logs(WebApiAuth, params={}, payload={})
assert res["code"] == 101, res
assert "KB ID" in res["message"], res
@pytest.mark.p3
def test_list_pipeline_logs_abnormal_date_filter(self, WebApiAuth, add_document):
kb_id, _ = add_document
res = kb_list_pipeline_logs(
WebApiAuth,
params={
"kb_id": kb_id,
"desc": "false",
"create_date_from": "2025-01-01",
"create_date_to": "2025-02-01",
},
payload={},
)
assert res["code"] == 102, res
assert "Create data filter is abnormal." in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_kb_pipeline_tasks.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_kb_tags_meta.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import pytest
from common import (
delete_knowledge_graph,
kb_basic_info,
kb_get_meta,
kb_update_metadata_setting,
knowledge_graph,
list_tags,
list_tags_from_kbs,
rename_tags,
rm_tags,
update_chunk,
)
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils import wait_for
INVALID_AUTH_CASES = [
(None, 401, "Unauthorized"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "Unauthorized"),
]
TAG_SEED_TIMEOUT = 20
def _wait_for_tag(auth, kb_id, tag, timeout=TAG_SEED_TIMEOUT):
@wait_for(timeout, 1, "Tag seed timeout")
def _condition():
res = list_tags(auth, kb_id)
if res["code"] != 0:
return False
return tag in res["data"]
try:
_condition()
except AssertionError:
return False
return True
def _seed_tag(auth, kb_id, document_id, chunk_id):
# KB tags are derived from chunk tag_kwd, not document metadata.
tag = f"tag_{uuid.uuid4().hex[:8]}"
res = update_chunk(
auth,
{
"doc_id": document_id,
"chunk_id": chunk_id,
"content_with_weight": f"tag seed {tag}",
"tag_kwd": [tag],
},
)
assert res["code"] == 0, res
if not _wait_for_tag(auth, kb_id, tag):
return None
return tag
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_list_tags_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = list_tags(invalid_auth, "kb_id")
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_list_tags_from_kbs_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = list_tags_from_kbs(invalid_auth, {"kb_ids": "kb_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_rm_tags_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = rm_tags(invalid_auth, "kb_id", {"tags": ["tag"]})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_rename_tag_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = rename_tags(invalid_auth, "kb_id", {"from_tag": "old", "to_tag": "new"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_get_meta_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = kb_get_meta(invalid_auth, {"kb_ids": "kb_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_basic_info_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = kb_basic_info(invalid_auth, {"kb_id": "kb_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_update_metadata_setting_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = kb_update_metadata_setting(invalid_auth, {"kb_id": "kb_id", "metadata": {}})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_knowledge_graph_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = knowledge_graph(invalid_auth, "kb_id")
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_delete_knowledge_graph_auth_invalid(self, invalid_auth, expected_code, expected_fragment):
res = delete_knowledge_graph(invalid_auth, "kb_id")
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
class TestKbTagsMeta:
@pytest.mark.p2
def test_list_tags(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = list_tags(WebApiAuth, kb_id)
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
@pytest.mark.p2
def test_list_tags_from_kbs(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = list_tags_from_kbs(WebApiAuth, {"kb_ids": kb_id})
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
@pytest.mark.p3
def test_rm_tags(self, WebApiAuth, add_chunks):
kb_id, document_id, chunk_ids = add_chunks
tag_to_remove = _seed_tag(WebApiAuth, kb_id, document_id, chunk_ids[0])
if not tag_to_remove:
# Tag aggregation is index-backed; skip if it never surfaces.
pytest.skip("Seeded tag did not appear in list_tags.")
res = rm_tags(WebApiAuth, kb_id, {"tags": [tag_to_remove]})
assert res["code"] == 0, res
assert res["data"] is True, res
@wait_for(TAG_SEED_TIMEOUT, 1, "Tag removal timeout")
def _condition():
after_res = list_tags(WebApiAuth, kb_id)
if after_res["code"] != 0:
return False
return tag_to_remove not in after_res["data"]
_condition()
@pytest.mark.p3
def test_rename_tag(self, WebApiAuth, add_chunks):
kb_id, document_id, chunk_ids = add_chunks
from_tag = _seed_tag(WebApiAuth, kb_id, document_id, chunk_ids[0])
if not from_tag:
# Tag aggregation is index-backed; skip if it never surfaces.
pytest.skip("Seeded tag did not appear in list_tags.")
to_tag = f"{from_tag}_renamed"
res = rename_tags(WebApiAuth, kb_id, {"from_tag": from_tag, "to_tag": to_tag})
assert res["code"] == 0, res
assert res["data"] is True, res
@wait_for(TAG_SEED_TIMEOUT, 1, "Tag rename timeout")
def _condition():
after_res = list_tags(WebApiAuth, kb_id)
if after_res["code"] != 0:
return False
tags = after_res["data"]
return to_tag in tags and from_tag not in tags
_condition()
@pytest.mark.p2
def test_get_meta(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = kb_get_meta(WebApiAuth, {"kb_ids": kb_id})
assert res["code"] == 0, res
assert isinstance(res["data"], dict), res
@pytest.mark.p2
def test_basic_info(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = kb_basic_info(WebApiAuth, {"kb_id": kb_id})
assert res["code"] == 0, res
for key in ["processing", "finished", "failed", "cancelled", "downloaded"]:
assert key in res["data"], res
@pytest.mark.p2
def test_update_metadata_setting(self, WebApiAuth, add_dataset):
kb_id = add_dataset
metadata = {"source": "test"}
res = kb_update_metadata_setting(WebApiAuth, {"kb_id": kb_id, "metadata": metadata, "enable_metadata": True})
assert res["code"] == 0, res
assert res["data"]["id"] == kb_id, res
assert res["data"]["parser_config"]["metadata"] == metadata, res
@pytest.mark.p2
def test_knowledge_graph(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = knowledge_graph(WebApiAuth, kb_id)
assert res["code"] == 0, res
assert isinstance(res["data"], dict), res
assert "graph" in res["data"], res
assert "mind_map" in res["data"], res
@pytest.mark.p2
def test_delete_knowledge_graph(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = delete_knowledge_graph(WebApiAuth, kb_id)
assert res["code"] == 0, res
assert res["data"] is True, res
class TestKbTagsMetaNegative:
@pytest.mark.p3
def test_list_tags_invalid_kb(self, WebApiAuth):
res = list_tags(WebApiAuth, "invalid_kb_id")
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_list_tags_from_kbs_invalid_kb(self, WebApiAuth):
res = list_tags_from_kbs(WebApiAuth, {"kb_ids": "invalid_kb_id"})
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_rm_tags_invalid_kb(self, WebApiAuth):
res = rm_tags(WebApiAuth, "invalid_kb_id", {"tags": ["tag"]})
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_rename_tag_invalid_kb(self, WebApiAuth):
res = rename_tags(WebApiAuth, "invalid_kb_id", {"from_tag": "old", "to_tag": "new"})
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_get_meta_invalid_kb(self, WebApiAuth):
res = kb_get_meta(WebApiAuth, {"kb_ids": "invalid_kb_id"})
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_basic_info_invalid_kb(self, WebApiAuth):
res = kb_basic_info(WebApiAuth, {"kb_id": "invalid_kb_id"})
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_update_metadata_setting_missing_metadata(self, WebApiAuth, add_dataset):
res = kb_update_metadata_setting(WebApiAuth, {"kb_id": add_dataset})
assert res["code"] == 101, res
assert "required argument are missing" in res["message"], res
assert "metadata" in res["message"], res
@pytest.mark.p3
def test_knowledge_graph_invalid_kb(self, WebApiAuth):
res = knowledge_graph(WebApiAuth, "invalid_kb_id")
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
@pytest.mark.p3
def test_delete_knowledge_graph_invalid_kb(self, WebApiAuth):
res = delete_knowledge_graph(WebApiAuth, "invalid_kb_id")
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_kb_tags_meta.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_llm_app/test_llm_list.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import llm_factories, llm_list
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
INVALID_AUTH_CASES = [
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
]
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_message", INVALID_AUTH_CASES)
def test_auth_invalid_factories(self, invalid_auth, expected_code, expected_message):
res = llm_factories(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_message", INVALID_AUTH_CASES)
def test_auth_invalid_list(self, invalid_auth, expected_code, expected_message):
res = llm_list(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestLLMList:
@pytest.mark.p1
def test_factories(self, WebApiAuth):
res = llm_factories(WebApiAuth)
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
@pytest.mark.p1
def test_list(self, WebApiAuth):
res = llm_list(WebApiAuth)
assert res["code"] == 0, res
assert isinstance(res["data"], dict), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_llm_app/test_llm_list.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_plugin_app/test_llm_tools.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib.util
import sys
from pathlib import Path
from types import ModuleType
import pytest
from common import plugin_llm_tools
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
INVALID_AUTH_CASES = [
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
]
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_message", INVALID_AUTH_CASES)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = plugin_llm_tools(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestPluginTools:
@pytest.mark.p1
def test_llm_tools(self, WebApiAuth):
res = plugin_llm_tools(WebApiAuth)
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
def _load_plugin_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
stub_apps = ModuleType("api.apps")
stub_apps.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", stub_apps)
stub_plugin = ModuleType("agent.plugin")
class _StubGlobalPluginManager:
@staticmethod
def get_llm_tools():
return []
stub_plugin.GlobalPluginManager = _StubGlobalPluginManager
monkeypatch.setitem(sys.modules, "agent.plugin", stub_plugin)
module_path = Path(__file__).resolve().parents[4] / "api" / "apps" / "plugin_app.py"
spec = importlib.util.spec_from_file_location("test_plugin_app_unit", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_llm_tools_metadata_shape_unit(monkeypatch):
module = _load_plugin_app(monkeypatch)
class _DummyTool:
def get_metadata(self):
return {"name": "dummy", "description": "test"}
monkeypatch.setattr(module.GlobalPluginManager, "get_llm_tools", staticmethod(lambda: [_DummyTool()]))
res = module.llm_tools()
assert res["code"] == 0
assert isinstance(res["data"], list)
assert res["data"][0]["name"] == "dummy"
assert res["data"][0]["description"] == "test"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_plugin_app/test_llm_tools.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_search_app/test_search_crud.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import pytest
from common import search_create, search_detail, search_list, search_rm, search_update
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
INVALID_AUTH_CASES = [
(None, 401, "Unauthorized"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "Unauthorized"),
]
def _search_name(prefix="search"):
return f"{prefix}_{uuid.uuid4().hex[:8]}"
def _find_tenant_id(WebApiAuth, search_id):
res = search_list(WebApiAuth, payload={})
assert res["code"] == 0, res
for search_app in res["data"]["search_apps"]:
if search_app.get("id") == search_id:
return search_app.get("tenant_id")
assert False, res
@pytest.fixture
def search_app(WebApiAuth):
name = _search_name()
create_res = search_create(WebApiAuth, {"name": name, "description": "test search"})
assert create_res["code"] == 0, create_res
search_id = create_res["data"]["search_id"]
yield search_id
rm_res = search_rm(WebApiAuth, {"search_id": search_id})
assert rm_res["code"] == 0, rm_res
assert rm_res["data"] is True, rm_res
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_create(self, invalid_auth, expected_code, expected_fragment):
res = search_create(invalid_auth, {"name": "dummy"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_list(self, invalid_auth, expected_code, expected_fragment):
res = search_list(invalid_auth, payload={})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_detail(self, invalid_auth, expected_code, expected_fragment):
res = search_detail(invalid_auth, {"search_id": "dummy_search_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_update(self, invalid_auth, expected_code, expected_fragment):
res = search_update(invalid_auth, {"search_id": "dummy", "name": "dummy", "search_config": {}, "tenant_id": "dummy"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_rm(self, invalid_auth, expected_code, expected_fragment):
res = search_rm(invalid_auth, {"search_id": "dummy_search_id"})
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
class TestSearchCrud:
@pytest.mark.p2
def test_create_and_rm(self, WebApiAuth):
name = _search_name("create")
create_res = search_create(WebApiAuth, {"name": name, "description": "test search"})
assert create_res["code"] == 0, create_res
search_id = create_res["data"]["search_id"]
rm_res = search_rm(WebApiAuth, {"search_id": search_id})
assert rm_res["code"] == 0, rm_res
assert rm_res["data"] is True, rm_res
@pytest.mark.p2
def test_list(self, WebApiAuth, search_app):
res = search_list(WebApiAuth, payload={})
assert res["code"] == 0, res
assert any(app.get("id") == search_app for app in res["data"]["search_apps"]), res
@pytest.mark.p2
def test_detail(self, WebApiAuth, search_app):
res = search_detail(WebApiAuth, {"search_id": search_app})
assert res["code"] == 0, res
assert res["data"].get("id") == search_app, res
@pytest.mark.p2
def test_update(self, WebApiAuth, search_app):
tenant_id = _find_tenant_id(WebApiAuth, search_app)
new_name = _search_name("updated")
payload = {
"search_id": search_app,
"name": new_name,
"search_config": {"top_k": 3},
"tenant_id": tenant_id,
}
res = search_update(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"].get("name") == new_name, res
@pytest.mark.p3
def test_create_invalid_name(self, WebApiAuth):
res = search_create(WebApiAuth, {"name": ""})
assert res["code"] == 102, res
assert "empty" in res["message"], res
@pytest.mark.p3
def test_update_invalid_search_id(self, WebApiAuth):
create_res = search_create(WebApiAuth, {"name": _search_name("invalid"), "description": "test search"})
assert create_res["code"] == 0, create_res
search_id = create_res["data"]["search_id"]
tenant_id = _find_tenant_id(WebApiAuth, search_id)
try:
payload = {
"search_id": "invalid_search_id",
"name": "invalid",
"search_config": {},
"tenant_id": tenant_id,
}
res = search_update(WebApiAuth, payload)
assert res["code"] == 109, res
assert "No authorization" in res["message"], res
finally:
rm_res = search_rm(WebApiAuth, {"search_id": search_id})
assert rm_res["code"] == 0, rm_res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_search_app/test_search_crud.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_system_app/test_system_basic.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
system_config,
system_delete_token,
system_new_token,
system_status,
system_token_list,
system_version,
)
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
INVALID_AUTH_CASES = [
(None, 401, "Unauthorized"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "Unauthorized"),
]
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_status(self, invalid_auth, expected_code, expected_fragment):
res = system_status(invalid_auth)
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_version(self, invalid_auth, expected_code, expected_fragment):
res = system_version(invalid_auth)
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_token_list(self, invalid_auth, expected_code, expected_fragment):
res = system_token_list(invalid_auth)
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth, expected_code, expected_fragment", INVALID_AUTH_CASES)
def test_auth_invalid_delete_token(self, invalid_auth, expected_code, expected_fragment):
res = system_delete_token(invalid_auth, "dummy_token")
assert res["code"] == expected_code, res
assert expected_fragment in res["message"], res
class TestSystemConfig:
@pytest.mark.p2
@pytest.mark.parametrize("invalid_auth", [None, RAGFlowWebApiAuth(INVALID_API_TOKEN)])
def test_config_no_auth_required(self, invalid_auth):
res = system_config(invalid_auth)
assert res["code"] == 0, res
assert "registerEnabled" in res["data"], res
class TestSystemEndpoints:
@pytest.mark.p2
def test_status(self, WebApiAuth):
res = system_status(WebApiAuth)
assert res["code"] == 0, res
for key in ["doc_engine", "storage", "database", "redis"]:
assert key in res["data"], res
@pytest.mark.p2
def test_version(self, WebApiAuth):
res = system_version(WebApiAuth)
assert res["code"] == 0, res
assert res["data"], res
@pytest.mark.p2
def test_token_list(self, WebApiAuth):
res = system_token_list(WebApiAuth)
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
@pytest.mark.p2
def test_delete_token(self, WebApiAuth):
create_res = system_new_token(WebApiAuth)
assert create_res["code"] == 0, create_res
token = create_res["data"]["token"]
delete_res = system_delete_token(WebApiAuth, token)
assert delete_res["code"] == 0, delete_res
assert delete_res["data"] is True, delete_res
@pytest.mark.p3
def test_delete_missing_token(self, WebApiAuth):
res = system_delete_token(WebApiAuth, "missing_token")
assert res["code"] == 0, res
assert res["data"] is True, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_system_app/test_system_basic.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_management/test_table_parser_dataset_chat.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import tempfile
import pytest
from utils import wait_for
from common import (
chat_completions,
create_chat_assistant,
create_session_with_chat_assistant,
delete_chat_assistants,
list_documents,
parse_documents,
upload_documents,
)
@wait_for(200, 1, "Document parsing timeout")
def wait_for_parsing_completion(auth, dataset_id, document_id=None):
"""
Wait for document parsing to complete.
Args:
auth: Authentication object
dataset_id: Dataset ID
document_id: Optional specific document ID to wait for
Returns:
bool: True if parsing is complete, False otherwise
"""
res = list_documents(auth, dataset_id)
docs = res["data"]["docs"]
if document_id is None:
# Wait for all documents to complete
for doc in docs:
status = doc.get("run", "UNKNOWN")
if status != "DONE":
# print(f"[DEBUG] Document {doc.get('name', 'unknown')} status: {status}, progress: {doc.get('progress', 0)}%, msg: {doc.get('progress_msg', '')}")
return False
return True
else:
# Wait for specific document
for doc in docs:
if doc["id"] == document_id:
status = doc.get("run", "UNKNOWN")
# print(f"[DEBUG] Document {doc.get('name', 'unknown')} status: {status}, progress: {doc.get('progress', 0)}%, msg: {doc.get('progress_msg', '')}")
if status == "DONE":
return True
elif status == "FAILED":
pytest.fail(f"Document parsing failed: {doc}")
return False
return False
# Test data
TEST_EXCEL_DATA = [
["employee_id", "name", "department", "salary"],
["E001", "Alice Johnson", "Engineering", "95000"],
["E002", "Bob Smith", "Marketing", "65000"],
["E003", "Carol Williams", "Engineering", "88000"],
["E004", "David Brown", "Sales", "72000"],
["E005", "Eva Davis", "HR", "68000"],
["E006", "Frank Miller", "Engineering", "102000"],
]
TEST_EXCEL_DATA_2 = [
["product", "price", "category"],
["Laptop", "999", "Electronics"],
["Mouse", "29", "Electronics"],
["Desk", "299", "Furniture"],
["Chair", "199", "Furniture"],
["Monitor", "399", "Electronics"],
["Keyboard", "79", "Electronics"],
]
DEFAULT_CHAT_PROMPT = (
"You are a helpful assistant that answers questions about table data using SQL queries.\n\n"
"Here is the knowledge base:\n{knowledge}\n\n"
"Use this information to answer questions."
)
@pytest.mark.usefixtures("add_table_parser_dataset")
class TestTableParserDatasetChat:
"""
Test table parser dataset chat functionality with Infinity backend.
Verifies that:
1. Excel files are uploaded and parsed correctly into table parser datasets
2. Chat assistants can query the parsed table data via SQL
3. Different types of queries work
"""
@pytest.fixture(autouse=True)
def setup_chat_assistant(self, HttpApiAuth, add_table_parser_dataset, request):
"""
Setup fixture that runs before each test method.
Creates chat assistant once and reuses it across all test cases.
"""
# Only setup once (first time)
if not hasattr(self.__class__, "chat_id"):
self.__class__.dataset_id = add_table_parser_dataset
self.__class__.auth = HttpApiAuth
# Upload and parse Excel files once for all tests
self._upload_and_parse_excel(HttpApiAuth, add_table_parser_dataset)
# Create a single chat assistant and session for all tests
chat_id, session_id = self._create_chat_assistant_with_session(HttpApiAuth, add_table_parser_dataset)
self.__class__.chat_id = chat_id
self.__class__.session_id = session_id
# Store the total number of parametrize cases
mark = request.node.get_closest_marker("parametrize")
if mark:
# Get the number of test cases from parametrize
param_values = mark.args[1]
self.__class__._total_tests = len(param_values)
else:
self.__class__._total_tests = 1
yield
# Teardown: cleanup chat assistant after all tests
# Use a class-level counter to track tests
if not hasattr(self.__class__, "_test_counter"):
self.__class__._test_counter = 0
self.__class__._test_counter += 1
# Cleanup after all parametrize tests complete
if self.__class__._test_counter >= self.__class__._total_tests:
self._teardown_chat_assistant()
def _teardown_chat_assistant(self):
"""Teardown method to clean up chat assistant."""
if hasattr(self.__class__, "chat_id") and self.__class__.chat_id:
try:
delete_chat_assistants(self.__class__.auth, {"ids": [self.__class__.chat_id]})
except Exception as e:
print(f"[Teardown] Warning: Failed to delete chat assistant: {e}")
@pytest.mark.p1
@pytest.mark.parametrize(
"question, expected_answer_pattern",
[
("show me column of product", r"\|product\|Source"),
("which product has price 79", r"Keyboard"),
("How many rows in the dataset?", r"rows|count\(\*\)"),
("Show me all employees in Engineering department", r"(Alice|Carol|Frank)"),
],
)
def test_table_parser_dataset_chat(self, question, expected_answer_pattern):
"""
Test that table parser dataset chat works correctly.
"""
# Use class-level attributes (set by setup fixture)
answer = self._ask_question(
self.__class__.auth,
self.__class__.chat_id,
self.__class__.session_id,
question
)
# Verify answer matches expected pattern if provided
if expected_answer_pattern:
self._assert_answer_matches_pattern(answer, expected_answer_pattern)
else:
# Just verify we got a non-empty answer
assert answer and len(answer) > 0, "Expected non-empty answer"
@staticmethod
def _upload_and_parse_excel(auth, dataset_id):
"""
Upload 2 Excel files and wait for parsing to complete.
Returns:
list: The document IDs of the uploaded files
Raises:
AssertionError: If upload or parsing fails
"""
excel_file_paths = []
document_ids = []
try:
# Create 2 temporary Excel files
excel_file_paths.append(TestTableParserDatasetChat._create_temp_excel_file(TEST_EXCEL_DATA))
excel_file_paths.append(TestTableParserDatasetChat._create_temp_excel_file(TEST_EXCEL_DATA_2))
# Upload documents
res = upload_documents(auth, dataset_id, excel_file_paths)
assert res["code"] == 0, f"Failed to upload documents: {res}"
for doc in res["data"]:
document_ids.append(doc["id"])
# Start parsing for all documents
parse_payload = {"document_ids": document_ids}
res = parse_documents(auth, dataset_id, parse_payload)
assert res["code"] == 0, f"Failed to start parsing: {res}"
# Wait for parsing completion for all documents
for doc_id in document_ids:
wait_for_parsing_completion(auth, dataset_id, doc_id)
return document_ids
finally:
# Clean up temporary files
for excel_file_path in excel_file_paths:
if excel_file_path:
os.unlink(excel_file_path)
@staticmethod
def _create_temp_excel_file(data):
"""
Create a temporary Excel file with the given table test data.
Args:
data: List of lists containing the Excel data
Returns:
str: Path to the created temporary file
"""
from openpyxl import Workbook
f = tempfile.NamedTemporaryFile(mode="wb", suffix=".xlsx", delete=False)
f.close()
wb = Workbook()
ws = wb.active
# Write test data to the worksheet
for row_idx, row_data in enumerate(data, start=1):
for col_idx, value in enumerate(row_data, start=1):
ws.cell(row=row_idx, column=col_idx, value=value)
wb.save(f.name)
return f.name
@staticmethod
def _create_chat_assistant_with_session(auth, dataset_id):
"""
Create a chat assistant and session for testing.
Returns:
tuple: (chat_id, session_id)
"""
import uuid
chat_payload = {
"name": f"test_table_parser_dataset_chat_{uuid.uuid4().hex[:8]}",
"dataset_ids": [dataset_id],
"prompt_config": {
"system": DEFAULT_CHAT_PROMPT,
"parameters": [
{
"key": "knowledge",
"optional": True,
"value": "Use the table data to answer questions with SQL queries.",
}
],
},
}
res = create_chat_assistant(auth, chat_payload)
assert res["code"] == 0, f"Failed to create chat assistant: {res}"
chat_id = res["data"]["id"]
res = create_session_with_chat_assistant(auth, chat_id, {"name": f"test_session_{uuid.uuid4().hex[:8]}"})
assert res["code"] == 0, f"Failed to create session: {res}"
session_id = res["data"]["id"]
return chat_id, session_id
def _ask_question(self, auth, chat_id, session_id, question):
"""
Send a question to the chat assistant and return the answer.
Returns:
str: The assistant's answer
"""
payload = {
"question": question,
"stream": False,
"session_id": session_id,
}
res_json = chat_completions(auth, chat_id, payload)
assert res_json["code"] == 0, f"Chat completion failed: {res_json}"
return res_json["data"]["answer"]
def _assert_answer_matches_pattern(self, answer, pattern):
"""
Assert that the answer matches the expected pattern.
Args:
answer: The actual answer from the chat assistant
pattern: Regular expression pattern to match
"""
assert re.search(pattern, answer, re.IGNORECASE), (
f"Answer does not match expected pattern '{pattern}'.\n"
f"Answer: {answer}"
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chat_management/test_table_parser_dataset_chat.py",
"license": "Apache License 2.0",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_admin_api/test_user_api_key_management/test_delete_user_api_key.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any
import pytest
import requests
from conftest import delete_user_api_key, generate_user_api_key, get_user_api_key, UNAUTHORIZED_ERROR_MESSAGE
from common.constants import RetCode
from configs import EMAIL, HOST_ADDRESS, PASSWORD, VERSION
class TestDeleteUserApiKey:
@pytest.mark.p2
def test_delete_user_api_key_success(self, admin_session: requests.Session) -> None:
"""Test successfully deleting an API key for a user"""
user_name: str = EMAIL
# Generate an API key first
generate_response: dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response.get("code") == RetCode.SUCCESS, f"Generate should succeed, got code {generate_response.get('code')}"
generated_key: dict[str, Any] = generate_response["data"]
token: str = generated_key["token"]
# Delete the API key
delete_response: dict[str, Any] = delete_user_api_key(admin_session, user_name, token)
# Verify response
assert delete_response.get("code") == RetCode.SUCCESS, f"Delete should succeed, got code {delete_response.get('code')}"
assert "message" in delete_response, "Response should contain message"
message: str = delete_response.get("message", "")
assert message == "API key deleted successfully", f"Message should indicate success, got: {message}"
@pytest.mark.p2
def test_user_api_key_removed_from_list_after_deletion(self, admin_session: requests.Session) -> None:
"""Test that deleted API key is removed from the list"""
user_name: str = EMAIL
# Generate an API key
generate_response: dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response.get("code") == RetCode.SUCCESS, f"Generate should succeed, got code {generate_response.get('code')}"
generated_key: dict[str, Any] = generate_response["data"]
token: str = generated_key["token"]
# Verify the key exists in the list
get_response_before: dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response_before.get("code") == RetCode.SUCCESS, f"Get should succeed, got code {get_response_before.get('code')}"
api_keys_before: list[dict[str, Any]] = get_response_before["data"]
token_found_before: bool = any(key.get("token") == token for key in api_keys_before)
assert token_found_before, "Generated API key should be in the list before deletion"
# Delete the API key
delete_response: dict[str, Any] = delete_user_api_key(admin_session, user_name, token)
assert delete_response.get("code") == RetCode.SUCCESS, f"Delete should succeed, got code {delete_response.get('code')}"
# Verify the key is no longer in the list
get_response_after: dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response_after.get("code") == RetCode.SUCCESS, f"Get should succeed, got code {get_response_after.get('code')}"
api_keys_after: list[dict[str, Any]] = get_response_after["data"]
token_found_after: bool = any(key.get("token") == token for key in api_keys_after)
assert not token_found_after, "Deleted API key should not be in the list after deletion"
@pytest.mark.p2
def test_delete_user_api_key_response_structure(self, admin_session: requests.Session) -> None:
"""Test that delete_user_api_key returns correct response structure"""
user_name: str = EMAIL
# Generate an API key
generate_response: dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response.get("code") == RetCode.SUCCESS, f"Generate should succeed, got code {generate_response.get('code')}"
token: str = generate_response["data"]["token"]
# Delete the API key
delete_response: dict[str, Any] = delete_user_api_key(admin_session, user_name, token)
# Verify response structure
assert delete_response.get("code") == RetCode.SUCCESS, f"Response code should be {RetCode.SUCCESS}, got {delete_response.get('code')}"
assert "message" in delete_response, "Response should contain message"
# Data can be None for delete operations
assert "data" in delete_response, "Response should contain data field"
@pytest.mark.p2
def test_delete_user_api_key_twice(self, admin_session: requests.Session) -> None:
"""Test that deleting the same token twice behaves correctly"""
user_name: str = EMAIL
# Generate an API key
generate_response: dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response.get("code") == RetCode.SUCCESS, f"Generate should succeed, got code {generate_response.get('code')}"
token: str = generate_response["data"]["token"]
# Delete the API key first time
delete_response1: dict[str, Any] = delete_user_api_key(admin_session, user_name, token)
assert delete_response1.get("code") == RetCode.SUCCESS, f"First delete should succeed, got code {delete_response1.get('code')}"
# Try to delete the same token again
delete_response2: dict[str, Any] = delete_user_api_key(admin_session, user_name, token)
# Second delete should fail since token no longer exists
assert delete_response2.get("code") == RetCode.NOT_FOUND, "Second delete should fail for already deleted token"
assert "message" in delete_response2, "Response should contain message"
@pytest.mark.p2
def test_delete_user_api_key_with_nonexistent_token(self, admin_session: requests.Session) -> None:
"""Test deleting a non-existent API key fails"""
user_name: str = EMAIL
nonexistent_token: str = "ragflow-nonexistent-token-12345"
# Try to delete a non-existent token
delete_response: dict[str, Any] = delete_user_api_key(admin_session, user_name, nonexistent_token)
# Should return error
assert delete_response.get("code") == RetCode.NOT_FOUND, "Delete should fail for non-existent token"
assert "message" in delete_response, "Response should contain message"
message: str = delete_response.get("message", "")
assert message == "API key not found or could not be deleted", f"Message should indicate token not found, got: {message}"
@pytest.mark.p2
def test_delete_user_api_key_with_nonexistent_user(self, admin_session: requests.Session) -> None:
"""Test deleting API key for non-existent user fails"""
nonexistent_user: str = "nonexistent_user_12345@example.com"
token: str = "ragflow-test-token-12345"
# Try to delete token for non-existent user
delete_response: dict[str, Any] = delete_user_api_key(admin_session, nonexistent_user, token)
# Should return error
assert delete_response.get("code") == RetCode.NOT_FOUND, "Delete should fail for non-existent user"
assert "message" in delete_response, "Response should contain message"
message: str = delete_response.get("message", "")
expected_message: str = f"User '{nonexistent_user}' not found"
assert message == expected_message, f"Message should indicate user not found, got: {message}"
@pytest.mark.p2
def test_delete_user_api_key_wrong_user_token(self, admin_session: requests.Session) -> None:
"""Test that deleting a token belonging to another user fails"""
user_name: str = EMAIL
# create second user
url: str = HOST_ADDRESS + f"/{VERSION}/user/register"
user2_email: str = "qa2@ragflow.io"
register_data: dict[str, str] = {"email": user2_email, "nickname": "qa2", "password": PASSWORD}
res: Any = requests.post(url=url, json=register_data)
res: dict[str, Any] = res.json()
if res.get("code") != 0 and "has already registered" not in res.get("message"):
raise Exception(f"Failed to create second user: {res.get("message")}")
# Generate a token for the test user
generate_response: dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response.get("code") == RetCode.SUCCESS, f"Generate should succeed, got code {generate_response.get('code')}"
token: str = generate_response["data"]["token"]
# Try to delete with the second username
delete_response: dict[str, Any] = delete_user_api_key(admin_session, user2_email, token)
# Should fail because user doesn't exist or token doesn't belong to that user
assert delete_response.get("code") == RetCode.NOT_FOUND, "Delete should fail for wrong user"
assert "message" in delete_response, "Response should contain message"
message: str = delete_response.get("message", "")
expected_message: str = "API key not found or could not be deleted"
assert message == expected_message, f"Message should indicate user not found, got: {message}"
@pytest.mark.p3
def test_delete_user_api_key_without_auth(self) -> None:
"""Test that deleting API key without admin auth fails"""
session: requests.Session = requests.Session()
user_name: str = EMAIL
token: str = "ragflow-test-token-12345"
response: dict[str, Any] = delete_user_api_key(session, user_name, token)
# Verify error response
assert response.get("code") == RetCode.UNAUTHORIZED, "Response code should indicate error"
assert "message" in response, "Response should contain message"
message: str = response.get("message", "").lower()
# The message is an HTML string indicating unauthorized user.
assert message == UNAUTHORIZED_ERROR_MESSAGE
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_admin_api/test_user_api_key_management/test_delete_user_api_key.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_admin_api/test_user_api_key_management/test_generate_user_api_key.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List
import pytest
import requests
from common.constants import RetCode
from conftest import generate_user_api_key, get_user_api_key, UNAUTHORIZED_ERROR_MESSAGE
from configs import EMAIL
class TestGenerateUserApiKey:
@pytest.mark.p2
def test_generate_user_api_key_success(self, admin_session: requests.Session) -> None:
"""Test successfully generating API key for a user"""
# Use the test user email (get_user_details expects email)
user_name: str = EMAIL
# Generate API key
response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
# Verify response code, message, and data
assert response.get("code") == RetCode.SUCCESS, f"Response code should be {RetCode.SUCCESS}, got {response.get('code')}"
assert "message" in response, "Response should contain message"
assert "data" in response, "Response should contain data"
assert response.get("data") is not None, "API key generation should return data"
result: Dict[str, Any] = response["data"]
# Verify response structure
assert "tenant_id" in result, "Response should contain tenant_id"
assert "token" in result, "Response should contain token"
assert "beta" in result, "Response should contain beta"
assert "create_time" in result, "Response should contain create_time"
assert "create_date" in result, "Response should contain create_date"
# Verify token format (should start with "ragflow-")
token: str = result["token"]
assert isinstance(token, str), "Token should be a string"
assert len(token) > 0, "Token should not be empty"
# Verify beta is independently generated
beta: str = result["beta"]
assert isinstance(beta, str), "Beta should be a string"
assert len(beta) == 32, "Beta should be 32 characters"
# Beta should be independent from token (not derived from it)
if token.startswith("ragflow-"):
token_without_prefix: str = token.replace("ragflow-", "")[:32]
assert beta != token_without_prefix, "Beta should be independently generated, not derived from token"
@pytest.mark.p2
def test_generate_user_api_key_appears_in_list(self, admin_session: requests.Session) -> None:
"""Test that generated API key appears in get_user_api_key list"""
user_name: str = EMAIL
# Generate API key
generate_response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response.get("code") == RetCode.SUCCESS, f"Generate should succeed, got code {generate_response.get('code')}"
generated_key: Dict[str, Any] = generate_response["data"]
token: str = generated_key["token"]
# Get all API keys for the user
get_response: Dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response.get("code") == RetCode.SUCCESS, f"Get should succeed, got code {get_response.get('code')}"
api_keys: List[Dict[str, Any]] = get_response["data"]
# Verify the generated key is in the list
assert len(api_keys) > 0, "User should have at least one API key"
token_found: bool = any(key.get("token") == token for key in api_keys)
assert token_found, "Generated API key should appear in the list"
@pytest.mark.p2
def test_generate_user_api_key_response_structure(self, admin_session: requests.Session) -> None:
"""Test that generate_user_api_key returns correct response structure"""
user_name: str = EMAIL
response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
# Verify response code, message, and data
assert response.get("code") == RetCode.SUCCESS, f"Response code should be {RetCode.SUCCESS}, got {response.get('code')}"
assert "message" in response, "Response should contain message"
assert "data" in response, "Response should contain data"
result: Dict[str, Any] = response["data"]
# Verify all required fields
assert "tenant_id" in result, "Response should have tenant_id"
assert "token" in result, "Response should have token"
assert "beta" in result, "Response should have beta"
assert "create_time" in result, "Response should have create_time"
assert "create_date" in result, "Response should have create_date"
assert "update_time" in result, "Response should have update_time"
assert "update_date" in result, "Response should have update_date"
# Verify field types
assert isinstance(result["tenant_id"], str), "tenant_id should be string"
assert isinstance(result["token"], str), "token should be string"
assert isinstance(result["beta"], str), "beta should be string"
assert isinstance(result["create_time"], (int, type(None))), "create_time should be int or None"
assert isinstance(result["create_date"], (str, type(None))), "create_date should be string or None"
@pytest.mark.p2
def test_generate_user_api_key_multiple_times(self, admin_session: requests.Session) -> None:
"""Test generating multiple API keys for the same user"""
user_name: str = EMAIL
# Generate first API key
response1: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response1.get("code") == RetCode.SUCCESS, f"First generate should succeed, got code {response1.get('code')}"
key1: Dict[str, Any] = response1["data"]
token1: str = key1["token"]
# Generate second API key
response2: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response2.get("code") == RetCode.SUCCESS, f"Second generate should succeed, got code {response2.get('code')}"
key2: Dict[str, Any] = response2["data"]
token2: str = key2["token"]
# Tokens should be different
assert token1 != token2, "Multiple API keys should have different tokens"
# Both should appear in the list
get_response: Dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response.get("code") == RetCode.SUCCESS, f"Get should succeed, got code {get_response.get('code')}"
api_keys: List[Dict[str, Any]] = get_response["data"]
tokens: List[str] = [key.get("token") for key in api_keys]
assert token1 in tokens, "First token should be in the list"
assert token2 in tokens, "Second token should be in the list"
@pytest.mark.p2
def test_generate_user_api_key_nonexistent_user(self, admin_session: requests.Session) -> None:
"""Test generating API key for non-existent user fails"""
response: Dict[str, Any] = generate_user_api_key(admin_session, "nonexistent_user_12345")
# Verify error response
assert response.get("code") == RetCode.NOT_FOUND, "Response code should indicate error"
assert "message" in response, "Response should contain message"
message: str = response.get("message", "")
assert message == "User not found!", f"Message should indicate user not found, got: {message}"
@pytest.mark.p2
def test_generate_user_api_key_tenant_id_consistency(self, admin_session: requests.Session) -> None:
"""Test that generated API keys have consistent tenant_id"""
user_name: str = EMAIL
# Generate multiple API keys
response1: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response1.get("code") == RetCode.SUCCESS, f"First generate should succeed, got code {response1.get('code')}"
key1: Dict[str, Any] = response1["data"]
response2: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response2.get("code") == RetCode.SUCCESS, f"Second generate should succeed, got code {response2.get('code')}"
key2: Dict[str, Any] = response2["data"]
# Tenant IDs should be the same for the same user
assert key1["tenant_id"] == key2["tenant_id"], "Same user should have same tenant_id"
@pytest.mark.p2
def test_generate_user_api_key_token_format(self, admin_session: requests.Session) -> None:
"""Test that generated API key has correct format"""
user_name: str = EMAIL
response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response.get("code") == RetCode.SUCCESS, f"Response code should be {RetCode.SUCCESS}, got {response.get('code')}"
result: Dict[str, Any] = response["data"]
token: str = result["token"]
# Token should be a non-empty string
assert isinstance(token, str), "Token should be a string"
assert len(token) > 0, "Token should not be empty"
# Beta should be independently generated (32 chars, not derived from token)
beta: str = result["beta"]
assert isinstance(beta, str), "Beta should be a string"
assert len(beta) == 32, "Beta should be 32 characters"
# Beta should be independent from token (not derived from it)
if token.startswith("ragflow-"):
token_without_prefix: str = token.replace("ragflow-", "")[:32]
assert beta != token_without_prefix, "Beta should be independently generated, not derived from token"
@pytest.mark.p1
def test_generate_user_api_key_without_auth(self) -> None:
"""Test that generating API key without admin auth fails"""
session: requests.Session = requests.Session()
user_name: str = EMAIL
response: Dict[str, Any] = generate_user_api_key(session, user_name)
# Verify error response
assert response.get("code") == RetCode.UNAUTHORIZED, "Response code should indicate error"
assert "message" in response, "Response should contain message"
message: str = response.get("message", "").lower()
# The message is an HTML string indicating unauthorized user .
assert message == UNAUTHORIZED_ERROR_MESSAGE
@pytest.mark.p3
def test_generate_user_api_key_timestamp_fields(self, admin_session: requests.Session) -> None:
"""Test that generated API key has correct timestamp fields"""
user_name: str = EMAIL
response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response.get("code") == RetCode.SUCCESS, f"Response code should be {RetCode.SUCCESS}, got {response.get('code')}"
result: Dict[str, Any] = response["data"]
# create_time should be a timestamp (int)
create_time: Any = result.get("create_time")
assert create_time is None or isinstance(create_time, int), "create_time should be int or None"
if create_time is not None:
assert create_time > 0, "create_time should be positive"
# create_date should be a date string
create_date: Any = result.get("create_date")
assert create_date is None or isinstance(create_date, str), "create_date should be string or None"
# update_time and update_date should be None for new keys
assert result.get("update_time") is None, "update_time should be None for new keys"
assert result.get("update_date") is None, "update_date should be None for new keys"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_admin_api/test_user_api_key_management/test_generate_user_api_key.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_admin_api/test_user_api_key_management/test_get_user_api_key.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List
import pytest
import requests
from conftest import generate_user_api_key, get_user_api_key, UNAUTHORIZED_ERROR_MESSAGE
from common.constants import RetCode
from configs import EMAIL
class TestGetUserApiKey:
@pytest.mark.p2
def test_get_user_api_key_success(self, admin_session: requests.Session) -> None:
"""Test successfully getting API keys for a user with correct response structure"""
user_name: str = EMAIL
# Generate a test API key first
generate_response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response["code"] == RetCode.SUCCESS, generate_response
generated_key: Dict[str, Any] = generate_response["data"]
generated_token: str = generated_key["token"]
# Get all API keys for the user
get_response: Dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response["code"] == RetCode.SUCCESS, get_response
assert "message" in get_response, "Response should contain message"
assert "data" in get_response, "Response should contain data"
api_keys: List[Dict[str, Any]] = get_response["data"]
# Verify response is a list with at least one key
assert isinstance(api_keys, list), "API keys should be returned as a list"
assert len(api_keys) > 0, "User should have at least one API key"
# Verify structure of each API key
for key in api_keys:
assert isinstance(key, dict), "Each API key should be a dictionary"
assert "token" in key, "API key should contain token"
assert "beta" in key, "API key should contain beta"
assert "tenant_id" in key, "API key should contain tenant_id"
assert "create_date" in key, "API key should contain create_date"
# Verify field types
assert isinstance(key["token"], str), "token should be string"
assert isinstance(key["beta"], str), "beta should be string"
assert isinstance(key["tenant_id"], str), "tenant_id should be string"
assert isinstance(key.get("create_date"), (str, type(None))), "create_date should be string or None"
assert isinstance(key.get("update_date"), (str, type(None))), "update_date should be string or None"
# Verify the generated key is in the list
token_found: bool = any(key.get("token") == generated_token for key in api_keys)
assert token_found, "Generated API key should appear in the list"
@pytest.mark.p2
def test_get_user_api_key_nonexistent_user(self, admin_session: requests.Session) -> None:
"""Test getting API keys for non-existent user fails"""
nonexistent_user: str = "nonexistent_user_12345"
response: Dict[str, Any] = get_user_api_key(admin_session, nonexistent_user)
assert response["code"] == RetCode.NOT_FOUND, response
assert "message" in response, "Response should contain message"
message: str = response["message"]
expected_message: str = f"User '{nonexistent_user}' not found"
assert message == expected_message, f"Message should indicate user not found, got: {message}"
@pytest.mark.p2
def test_get_user_api_key_empty_username(self, admin_session: requests.Session) -> None:
"""Test getting API keys with empty username"""
response: Dict[str, Any] = get_user_api_key(admin_session, "")
# Empty username should either return error or empty list
if response["code"] == RetCode.SUCCESS:
assert "data" in response, "Response should contain data"
api_keys: List[Dict[str, Any]] = response["data"]
assert isinstance(api_keys, list), "Should return a list"
assert len(api_keys) == 0, "Empty username should return empty list"
else:
assert "message" in response, "Error response should contain message"
assert len(response["message"]) > 0, "Error message should not be empty"
@pytest.mark.p2
def test_get_user_api_key_token_uniqueness(self, admin_session: requests.Session) -> None:
"""Test that all API keys in the list have unique tokens"""
user_name: str = EMAIL
# Generate multiple API keys
response1: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response1["code"] == RetCode.SUCCESS, response1
response2: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response2["code"] == RetCode.SUCCESS, response2
# Get all API keys
get_response: Dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response["code"] == RetCode.SUCCESS, get_response
api_keys: List[Dict[str, Any]] = get_response["data"]
# Verify all tokens are unique
tokens: List[str] = [key.get("token") for key in api_keys if key.get("token")]
assert len(tokens) == len(set(tokens)), "All API keys should have unique tokens"
@pytest.mark.p2
def test_get_user_api_key_tenant_id_consistency(self, admin_session: requests.Session) -> None:
"""Test that all API keys for a user have the same tenant_id"""
user_name: str = EMAIL
# Generate multiple API keys
response1: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response1["code"] == RetCode.SUCCESS, response1
response2: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert response2["code"] == RetCode.SUCCESS, response2
# Get all API keys
get_response: Dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response["code"] == RetCode.SUCCESS, get_response
api_keys: List[Dict[str, Any]] = get_response["data"]
# Verify all keys have the same tenant_id
tenant_ids: List[str] = [key.get("tenant_id") for key in api_keys if key.get("tenant_id")]
if len(tenant_ids) > 0:
assert all(tid == tenant_ids[0] for tid in tenant_ids), "All API keys should have the same tenant_id"
@pytest.mark.p2
def test_get_user_api_key_beta_format(self, admin_session: requests.Session) -> None:
"""Test that beta field in API keys has correct format (32 characters)"""
user_name: str = EMAIL
# Generate a test API key
generate_response: Dict[str, Any] = generate_user_api_key(admin_session, user_name)
assert generate_response["code"] == RetCode.SUCCESS, generate_response
# Get all API keys
get_response: Dict[str, Any] = get_user_api_key(admin_session, user_name)
assert get_response["code"] == RetCode.SUCCESS, get_response
api_keys: List[Dict[str, Any]] = get_response["data"]
# Verify beta format for all keys
for key in api_keys:
beta: str = key.get("beta", "")
assert isinstance(beta, str), "beta should be a string"
assert len(beta) == 32, f"beta should be 32 characters, got {len(beta)}"
@pytest.mark.p3
def test_get_user_api_key_without_auth(self) -> None:
"""Test that getting API keys without admin auth fails"""
session: requests.Session = requests.Session()
user_name: str = EMAIL
response: Dict[str, Any] = get_user_api_key(session, user_name)
assert response["code"] == RetCode.UNAUTHORIZED, response
assert "message" in response, "Response should contain message"
message: str = response["message"].lower()
assert message == UNAUTHORIZED_ERROR_MESSAGE
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_admin_api/test_user_api_key_management/test_get_user_api_key.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/auth.py | from typing import Any, Dict, Optional
from .http_client import HttpClient
class AuthError(RuntimeError):
pass
def encrypt_password(password_plain: str) -> str:
try:
from api.utils.crypt import crypt
except Exception as exc:
raise AuthError(
"Password encryption unavailable; install pycryptodomex (uv sync --python 3.12 --group test)."
) from exc
return crypt(password_plain)
def register_user(client: HttpClient, email: str, nickname: str, password_enc: str) -> None:
payload = {"email": email, "nickname": nickname, "password": password_enc}
res = client.request_json("POST", "/user/register", use_api_base=False, auth_kind=None, json_body=payload)
if res.get("code") == 0:
return
msg = res.get("message", "")
if "has already registered" in msg:
return
raise AuthError(f"Register failed: {msg}")
def login_user(client: HttpClient, email: str, password_enc: str) -> str:
payload = {"email": email, "password": password_enc}
response = client.request("POST", "/user/login", use_api_base=False, auth_kind=None, json_body=payload)
try:
res = response.json()
except Exception as exc:
raise AuthError(f"Login failed: invalid JSON response ({exc})") from exc
if res.get("code") != 0:
raise AuthError(f"Login failed: {res.get('message')}")
token = response.headers.get("Authorization")
if not token:
raise AuthError("Login failed: missing Authorization header")
return token
def create_api_token(client: HttpClient, login_token: str, token_name: Optional[str] = None) -> str:
client.login_token = login_token
params = {"name": token_name} if token_name else None
res = client.request_json("POST", "/system/new_token", use_api_base=False, auth_kind="login", params=params)
if res.get("code") != 0:
raise AuthError(f"API token creation failed: {res.get('message')}")
token = res.get("data", {}).get("token")
if not token:
raise AuthError("API token creation failed: missing token in response")
return token
def get_my_llms(client: HttpClient) -> Dict[str, Any]:
res = client.request_json("GET", "/llm/my_llms", use_api_base=False, auth_kind="login")
if res.get("code") != 0:
raise AuthError(f"Failed to list LLMs: {res.get('message')}")
return res.get("data", {})
def set_llm_api_key(
client: HttpClient,
llm_factory: str,
api_key: str,
base_url: Optional[str] = None,
) -> None:
payload = {"llm_factory": llm_factory, "api_key": api_key}
if base_url:
payload["base_url"] = base_url
res = client.request_json("POST", "/llm/set_api_key", use_api_base=False, auth_kind="login", json_body=payload)
if res.get("code") != 0:
raise AuthError(f"Failed to set LLM API key: {res.get('message')}")
def get_tenant_info(client: HttpClient) -> Dict[str, Any]:
res = client.request_json("GET", "/user/tenant_info", use_api_base=False, auth_kind="login")
if res.get("code") != 0:
raise AuthError(f"Failed to get tenant info: {res.get('message')}")
return res.get("data", {})
def set_tenant_info(client: HttpClient, payload: Dict[str, Any]) -> None:
res = client.request_json("POST", "/user/set_tenant_info", use_api_base=False, auth_kind="login", json_body=payload)
if res.get("code") != 0:
raise AuthError(f"Failed to set tenant info: {res.get('message')}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/auth.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/chat.py | import json
import time
from typing import Any, Dict, List, Optional
from .http_client import HttpClient
from .metrics import ChatSample
class ChatError(RuntimeError):
pass
def delete_chat(client: HttpClient, chat_id: str) -> None:
payload = {"ids": [chat_id]}
res = client.request_json("DELETE", "/chats", json_body=payload)
if res.get("code") != 0:
raise ChatError(f"Delete chat failed: {res.get('message')}")
def create_chat(
client: HttpClient,
name: str,
dataset_ids: Optional[List[str]] = None,
payload: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
body = dict(payload or {})
if "name" not in body:
body["name"] = name
if dataset_ids is not None and "dataset_ids" not in body:
body["dataset_ids"] = dataset_ids
res = client.request_json("POST", "/chats", json_body=body)
if res.get("code") != 0:
raise ChatError(f"Create chat failed: {res.get('message')}")
return res.get("data", {})
def get_chat(client: HttpClient, chat_id: str) -> Dict[str, Any]:
res = client.request_json("GET", "/chats", params={"id": chat_id})
if res.get("code") != 0:
raise ChatError(f"Get chat failed: {res.get('message')}")
data = res.get("data", [])
if not data:
raise ChatError("Chat not found")
return data[0]
def resolve_model(model: Optional[str], chat_data: Optional[Dict[str, Any]]) -> str:
if model:
return model
if chat_data:
llm = chat_data.get("llm") or {}
llm_name = llm.get("model_name")
if llm_name:
return llm_name
raise ChatError("Model name is required; provide --model or use a chat with llm.model_name.")
def _parse_stream_error(response) -> Optional[str]:
content_type = response.headers.get("Content-Type", "")
if "text/event-stream" in content_type:
return None
try:
payload = response.json()
except Exception:
return f"Unexpected non-stream response (status {response.status_code})"
if payload.get("code") not in (0, None):
return payload.get("message", "Unknown error")
return f"Unexpected non-stream response (status {response.status_code})"
def stream_chat_completion(
client: HttpClient,
chat_id: str,
model: str,
messages: List[Dict[str, Any]],
extra_body: Optional[Dict[str, Any]] = None,
) -> ChatSample:
payload: Dict[str, Any] = {"model": model, "messages": messages, "stream": True}
if extra_body:
payload["extra_body"] = extra_body
t0 = time.perf_counter()
response = client.request(
"POST",
f"/chats_openai/{chat_id}/chat/completions",
json_body=payload,
stream=True,
)
error = _parse_stream_error(response)
if error:
response.close()
return ChatSample(t0=t0, t1=None, t2=None, error=error)
t1: Optional[float] = None
t2: Optional[float] = None
stream_error: Optional[str] = None
content_parts: List[str] = []
try:
for raw_line in response.iter_lines(decode_unicode=True):
if raw_line is None:
continue
line = raw_line.strip()
if not line or not line.startswith("data:"):
continue
data = line[5:].strip()
if not data:
continue
if data == "[DONE]":
t2 = time.perf_counter()
break
try:
chunk = json.loads(data)
except Exception as exc:
stream_error = f"Invalid JSON chunk: {exc}"
t2 = time.perf_counter()
break
choices = chunk.get("choices") or []
choice = choices[0] if choices else {}
delta = choice.get("delta") or {}
content = delta.get("content")
if t1 is None and isinstance(content, str) and content != "":
t1 = time.perf_counter()
if isinstance(content, str) and content:
content_parts.append(content)
finish_reason = choice.get("finish_reason")
if finish_reason:
t2 = time.perf_counter()
break
finally:
response.close()
if t2 is None:
t2 = time.perf_counter()
response_text = "".join(content_parts) if content_parts else None
if stream_error:
return ChatSample(t0=t0, t1=t1, t2=t2, error=stream_error, response_text=response_text)
if t1 is None:
return ChatSample(t0=t0, t1=None, t2=t2, error="No assistant content received", response_text=response_text)
return ChatSample(t0=t0, t1=t1, t2=t2, error=None, response_text=response_text)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/chat.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/cli.py | import argparse
import json
import os
import multiprocessing as mp
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
from pathlib import Path
from typing import Any, Dict, List, Optional
from . import auth
from .auth import AuthError
from .chat import ChatError, create_chat, delete_chat, get_chat, resolve_model, stream_chat_completion
from .dataset import (
DatasetError,
create_dataset,
dataset_has_chunks,
delete_dataset,
extract_document_ids,
list_datasets,
parse_documents,
upload_documents,
wait_for_parse_done,
)
from .http_client import HttpClient
from .metrics import ChatSample, RetrievalSample, summarize
from .report import chat_report, retrieval_report
from .retrieval import RetrievalError, build_payload, run_retrieval as run_retrieval_request
from .utils import eprint, load_json_arg, split_csv
def _parse_args() -> argparse.Namespace:
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument(
"--base-url",
default=os.getenv("RAGFLOW_BASE_URL") or os.getenv("HOST_ADDRESS"),
help="Base URL (env: RAGFLOW_BASE_URL or HOST_ADDRESS)",
)
base_parser.add_argument(
"--api-version",
default=os.getenv("RAGFLOW_API_VERSION", "v1"),
help="API version (default: v1)",
)
base_parser.add_argument("--api-key", help="API key (Bearer token)")
base_parser.add_argument("--connect-timeout", type=float, default=5.0, help="Connect timeout seconds")
base_parser.add_argument("--read-timeout", type=float, default=60.0, help="Read timeout seconds")
base_parser.add_argument("--no-verify-ssl", action="store_false", dest="verify_ssl", help="Disable SSL verification")
base_parser.add_argument("--iterations", type=int, default=1, help="Number of iterations")
base_parser.add_argument("--concurrency", type=int, default=1, help="Concurrency")
base_parser.add_argument("--json", action="store_true", help="Print JSON report (optional)")
base_parser.add_argument("--print-response", action="store_true", help="Print response content per iteration")
base_parser.add_argument(
"--response-max-chars",
type=int,
default=0,
help="Truncate printed response to N chars (0 = no limit)",
)
# Auth/login options
base_parser.add_argument("--login-email", default=os.getenv("RAGFLOW_EMAIL"), help="Login email")
base_parser.add_argument("--login-nickname", default=os.getenv("RAGFLOW_NICKNAME"), help="Nickname for registration")
base_parser.add_argument("--login-password", help="Login password (encrypted client-side)")
base_parser.add_argument("--allow-register", action="store_true", help="Attempt /user/register before login")
base_parser.add_argument("--token-name", help="Optional API token name")
base_parser.add_argument("--bootstrap-llm", action="store_true", help="Ensure LLM factory API key is configured")
base_parser.add_argument("--llm-factory", default=os.getenv("RAGFLOW_LLM_FACTORY"), help="LLM factory name")
base_parser.add_argument("--llm-api-key", default=os.getenv("ZHIPU_AI_API_KEY"), help="LLM API key")
base_parser.add_argument("--llm-api-base", default=os.getenv("RAGFLOW_LLM_API_BASE"), help="LLM API base URL")
base_parser.add_argument("--set-tenant-info", action="store_true", help="Set tenant default model IDs")
base_parser.add_argument("--tenant-llm-id", default=os.getenv("RAGFLOW_TENANT_LLM_ID"), help="Tenant chat model ID")
base_parser.add_argument("--tenant-embd-id", default=os.getenv("RAGFLOW_TENANT_EMBD_ID"), help="Tenant embedding model ID")
base_parser.add_argument("--tenant-img2txt-id", default=os.getenv("RAGFLOW_TENANT_IMG2TXT_ID"), help="Tenant image2text model ID")
base_parser.add_argument("--tenant-asr-id", default=os.getenv("RAGFLOW_TENANT_ASR_ID", ""), help="Tenant ASR model ID")
base_parser.add_argument("--tenant-tts-id", default=os.getenv("RAGFLOW_TENANT_TTS_ID"), help="Tenant TTS model ID")
# Dataset/doc options
base_parser.add_argument("--dataset-id", help="Existing dataset ID")
base_parser.add_argument("--dataset-ids", help="Comma-separated dataset IDs")
base_parser.add_argument("--dataset-name", default=os.getenv("RAGFLOW_DATASET_NAME"), help="Dataset name when creating")
base_parser.add_argument("--dataset-payload", help="Dataset payload JSON or @file")
base_parser.add_argument("--document-path", action="append", help="Document path (repeatable)")
base_parser.add_argument("--document-paths-file", help="File with document paths, one per line")
base_parser.add_argument("--parse-timeout", type=float, default=120.0, help="Parse timeout seconds")
base_parser.add_argument("--parse-interval", type=float, default=1.0, help="Parse poll interval seconds")
base_parser.add_argument("--teardown", action="store_true", help="Delete created resources after run")
parser = argparse.ArgumentParser(description="RAGFlow HTTP API benchmark", parents=[base_parser])
subparsers = parser.add_subparsers(dest="command", required=True)
chat_parser = subparsers.add_parser(
"chat",
help="Chat streaming latency benchmark",
parents=[base_parser],
add_help=False,
)
chat_parser.add_argument("--chat-id", help="Existing chat ID")
chat_parser.add_argument("--chat-name", default=os.getenv("RAGFLOW_CHAT_NAME"), help="Chat name when creating")
chat_parser.add_argument("--chat-payload", help="Chat payload JSON or @file")
chat_parser.add_argument("--model", default=os.getenv("RAGFLOW_CHAT_MODEL"), help="Model name for OpenAI endpoint")
chat_parser.add_argument("--message", help="User message")
chat_parser.add_argument("--messages-json", help="Messages JSON or @file")
chat_parser.add_argument("--extra-body", help="extra_body JSON or @file")
retrieval_parser = subparsers.add_parser(
"retrieval",
help="Retrieval latency benchmark",
parents=[base_parser],
add_help=False,
)
retrieval_parser.add_argument("--question", help="Retrieval question")
retrieval_parser.add_argument("--payload", help="Retrieval payload JSON or @file")
retrieval_parser.add_argument("--document-ids", help="Comma-separated document IDs")
return parser.parse_args()
def _load_paths(args: argparse.Namespace) -> List[str]:
paths = []
if args.document_path:
paths.extend(args.document_path)
if args.document_paths_file:
file_path = Path(args.document_paths_file)
for line in file_path.read_text(encoding="utf-8").splitlines():
line = line.strip()
if line:
paths.append(line)
return paths
def _truncate_text(text: str, max_chars: int) -> str:
if max_chars and len(text) > max_chars:
return f"{text[:max_chars]}...[truncated]"
return text
def _format_chat_response(sample: ChatSample, max_chars: int) -> str:
if sample.error:
text = f"[error] {sample.error}"
if sample.response_text:
text = f"{text} | {sample.response_text}"
else:
text = sample.response_text or ""
if not text:
text = "(empty)"
return _truncate_text(text, max_chars)
def _format_retrieval_response(sample: RetrievalSample, max_chars: int) -> str:
if sample.response is not None:
text = json.dumps(sample.response, ensure_ascii=False, sort_keys=True)
if sample.error:
text = f"[error] {sample.error} | {text}"
elif sample.error:
text = f"[error] {sample.error}"
else:
text = "(empty)"
return _truncate_text(text, max_chars)
def _chat_worker(
base_url: str,
api_version: str,
api_key: str,
connect_timeout: float,
read_timeout: float,
verify_ssl: bool,
chat_id: str,
model: str,
messages: List[Dict[str, Any]],
extra_body: Optional[Dict[str, Any]],
) -> ChatSample:
client = HttpClient(
base_url=base_url,
api_version=api_version,
api_key=api_key,
connect_timeout=connect_timeout,
read_timeout=read_timeout,
verify_ssl=verify_ssl,
)
return stream_chat_completion(client, chat_id, model, messages, extra_body)
def _retrieval_worker(
base_url: str,
api_version: str,
api_key: str,
connect_timeout: float,
read_timeout: float,
verify_ssl: bool,
payload: Dict[str, Any],
) -> RetrievalSample:
client = HttpClient(
base_url=base_url,
api_version=api_version,
api_key=api_key,
connect_timeout=connect_timeout,
read_timeout=read_timeout,
verify_ssl=verify_ssl,
)
return run_retrieval_request(client, payload)
def _ensure_auth(client: HttpClient, args: argparse.Namespace) -> None:
if args.api_key:
client.api_key = args.api_key
return
if not args.login_email:
raise AuthError("Missing API key and login email")
if not args.login_password:
raise AuthError("Missing login password")
password_enc = auth.encrypt_password(args.login_password)
if args.allow_register:
nickname = args.login_nickname or args.login_email.split("@")[0]
try:
auth.register_user(client, args.login_email, nickname, password_enc)
except AuthError as exc:
eprint(f"Register warning: {exc}")
login_token = auth.login_user(client, args.login_email, password_enc)
client.login_token = login_token
if args.bootstrap_llm:
if not args.llm_factory:
raise AuthError("Missing --llm-factory for bootstrap")
if not args.llm_api_key:
raise AuthError("Missing --llm-api-key for bootstrap")
existing = auth.get_my_llms(client)
if args.llm_factory not in existing:
auth.set_llm_api_key(client, args.llm_factory, args.llm_api_key, args.llm_api_base)
if args.set_tenant_info:
if not args.tenant_llm_id or not args.tenant_embd_id:
raise AuthError("Missing --tenant-llm-id or --tenant-embd-id for tenant setup")
tenant = auth.get_tenant_info(client)
tenant_id = tenant.get("tenant_id")
if not tenant_id:
raise AuthError("Tenant info missing tenant_id")
payload = {
"tenant_id": tenant_id,
"llm_id": args.tenant_llm_id,
"embd_id": args.tenant_embd_id,
"img2txt_id": args.tenant_img2txt_id or "",
"asr_id": args.tenant_asr_id or "",
"tts_id": args.tenant_tts_id,
}
auth.set_tenant_info(client, payload)
api_key = auth.create_api_token(client, login_token, args.token_name)
client.api_key = api_key
def _prepare_dataset(
client: HttpClient,
args: argparse.Namespace,
needs_dataset: bool,
document_paths: List[str],
) -> Dict[str, Any]:
created = {}
dataset_ids = split_csv(args.dataset_ids) or []
dataset_id = args.dataset_id
dataset_payload = load_json_arg(args.dataset_payload, "dataset-payload") if args.dataset_payload else None
if dataset_id:
dataset_ids = [dataset_id]
elif dataset_ids:
dataset_id = dataset_ids[0]
elif needs_dataset or document_paths:
if not args.dataset_name and not (dataset_payload and dataset_payload.get("name")):
raise DatasetError("Missing --dataset-name or dataset payload name")
name = args.dataset_name or dataset_payload.get("name")
data = create_dataset(client, name, dataset_payload)
dataset_id = data.get("id")
if not dataset_id:
raise DatasetError("Dataset creation did not return id")
dataset_ids = [dataset_id]
created["Created Dataset ID"] = dataset_id
return {
"dataset_id": dataset_id,
"dataset_ids": dataset_ids,
"dataset_payload": dataset_payload,
"created": created,
}
def _maybe_upload_and_parse(
client: HttpClient,
dataset_id: str,
document_paths: List[str],
parse_timeout: float,
parse_interval: float,
) -> List[str]:
if not document_paths:
return []
docs = upload_documents(client, dataset_id, document_paths)
doc_ids = extract_document_ids(docs)
if not doc_ids:
raise DatasetError("No document IDs returned after upload")
parse_documents(client, dataset_id, doc_ids)
wait_for_parse_done(client, dataset_id, doc_ids, parse_timeout, parse_interval)
return doc_ids
def _ensure_dataset_has_chunks(client: HttpClient, dataset_id: str) -> None:
datasets = list_datasets(client, dataset_id=dataset_id)
if not datasets:
raise DatasetError("Dataset not found")
if not dataset_has_chunks(datasets[0]):
raise DatasetError("Dataset has no parsed chunks; upload and parse documents first.")
def _cleanup(client: HttpClient, created: Dict[str, str], teardown: bool) -> None:
if not teardown:
return
chat_id = created.get("Created Chat ID")
if chat_id:
try:
delete_chat(client, chat_id)
except Exception as exc:
eprint(f"Cleanup warning: failed to delete chat {chat_id}: {exc}")
dataset_id = created.get("Created Dataset ID")
if dataset_id:
try:
delete_dataset(client, dataset_id)
except Exception as exc:
eprint(f"Cleanup warning: failed to delete dataset {dataset_id}: {exc}")
def run_chat(client: HttpClient, args: argparse.Namespace) -> int:
document_paths = _load_paths(args)
needs_dataset = bool(document_paths)
dataset_info = _prepare_dataset(client, args, needs_dataset, document_paths)
created = dict(dataset_info["created"])
dataset_id = dataset_info["dataset_id"]
dataset_ids = dataset_info["dataset_ids"]
doc_ids = []
if dataset_id and document_paths:
doc_ids = _maybe_upload_and_parse(client, dataset_id, document_paths, args.parse_timeout, args.parse_interval)
created["Created Document IDs"] = ",".join(doc_ids)
if dataset_id and not document_paths:
_ensure_dataset_has_chunks(client, dataset_id)
if dataset_id and not document_paths and dataset_ids:
_ensure_dataset_has_chunks(client, dataset_id)
chat_payload = load_json_arg(args.chat_payload, "chat-payload") if args.chat_payload else None
chat_id = args.chat_id
if not chat_id:
if not args.chat_name and not (chat_payload and chat_payload.get("name")):
raise ChatError("Missing --chat-name or chat payload name")
chat_name = args.chat_name or chat_payload.get("name")
chat_data = create_chat(client, chat_name, dataset_ids or [], chat_payload)
chat_id = chat_data.get("id")
if not chat_id:
raise ChatError("Chat creation did not return id")
created["Created Chat ID"] = chat_id
chat_data = get_chat(client, chat_id)
model = resolve_model(args.model, chat_data)
messages = None
if args.messages_json:
messages = load_json_arg(args.messages_json, "messages-json")
if not messages:
if not args.message:
raise ChatError("Missing --message or --messages-json")
messages = [{"role": "user", "content": args.message}]
extra_body = load_json_arg(args.extra_body, "extra-body") if args.extra_body else None
samples: List[ChatSample] = []
responses: List[str] = []
start_time = time.perf_counter()
if args.concurrency <= 1:
for _ in range(args.iterations):
samples.append(stream_chat_completion(client, chat_id, model, messages, extra_body))
else:
results: List[Optional[ChatSample]] = [None] * args.iterations
mp_context = mp.get_context("spawn")
with ProcessPoolExecutor(max_workers=args.concurrency, mp_context=mp_context) as executor:
future_map = {
executor.submit(
_chat_worker,
client.base_url,
client.api_version,
client.api_key or "",
client.connect_timeout,
client.read_timeout,
client.verify_ssl,
chat_id,
model,
messages,
extra_body,
): idx
for idx in range(args.iterations)
}
for future in as_completed(future_map):
idx = future_map[future]
results[idx] = future.result()
samples = [sample for sample in results if sample is not None]
total_duration = time.perf_counter() - start_time
if args.print_response:
for idx, sample in enumerate(samples, start=1):
rendered = _format_chat_response(sample, args.response_max_chars)
if args.json:
responses.append(rendered)
else:
print(f"Response[{idx}]: {rendered}")
total_latencies = [s.total_latency for s in samples if s.total_latency is not None and s.error is None]
first_latencies = [s.first_token_latency for s in samples if s.first_token_latency is not None and s.error is None]
success = len(total_latencies)
failure = len(samples) - success
errors = [s.error for s in samples if s.error]
total_stats = summarize(total_latencies)
first_stats = summarize(first_latencies)
if args.json:
payload = {
"interface": "chat",
"concurrency": args.concurrency,
"iterations": args.iterations,
"success": success,
"failure": failure,
"model": model,
"total_latency": total_stats,
"first_token_latency": first_stats,
"errors": [e for e in errors if e],
"created": created,
"total_duration_s": total_duration,
"qps": (args.iterations / total_duration) if total_duration > 0 else None,
}
if args.print_response:
payload["responses"] = responses
print(json.dumps(payload, sort_keys=True))
else:
report = chat_report(
interface="chat",
concurrency=args.concurrency,
total_duration_s=total_duration,
iterations=args.iterations,
success=success,
failure=failure,
model=model,
total_stats=total_stats,
first_token_stats=first_stats,
errors=[e for e in errors if e],
created=created,
)
print(report, end="")
_cleanup(client, created, args.teardown)
return 0 if failure == 0 else 1
def run_retrieval(client: HttpClient, args: argparse.Namespace) -> int:
document_paths = _load_paths(args)
needs_dataset = True
dataset_info = _prepare_dataset(client, args, needs_dataset, document_paths)
created = dict(dataset_info["created"])
dataset_id = dataset_info["dataset_id"]
dataset_ids = dataset_info["dataset_ids"]
if not dataset_ids:
raise RetrievalError("dataset_ids required for retrieval")
doc_ids = []
if dataset_id and document_paths:
doc_ids = _maybe_upload_and_parse(client, dataset_id, document_paths, args.parse_timeout, args.parse_interval)
created["Created Document IDs"] = ",".join(doc_ids)
payload_override = load_json_arg(args.payload, "payload") if args.payload else None
question = args.question
if not question and (payload_override is None or "question" not in payload_override):
raise RetrievalError("Missing --question or retrieval payload question")
document_ids = split_csv(args.document_ids) if args.document_ids else None
payload = build_payload(question, dataset_ids, document_ids, payload_override)
samples: List[RetrievalSample] = []
responses: List[str] = []
start_time = time.perf_counter()
if args.concurrency <= 1:
for _ in range(args.iterations):
samples.append(run_retrieval_request(client, payload))
else:
results: List[Optional[RetrievalSample]] = [None] * args.iterations
mp_context = mp.get_context("spawn")
with ProcessPoolExecutor(max_workers=args.concurrency, mp_context=mp_context) as executor:
future_map = {
executor.submit(
_retrieval_worker,
client.base_url,
client.api_version,
client.api_key or "",
client.connect_timeout,
client.read_timeout,
client.verify_ssl,
payload,
): idx
for idx in range(args.iterations)
}
for future in as_completed(future_map):
idx = future_map[future]
results[idx] = future.result()
samples = [sample for sample in results if sample is not None]
total_duration = time.perf_counter() - start_time
if args.print_response:
for idx, sample in enumerate(samples, start=1):
rendered = _format_retrieval_response(sample, args.response_max_chars)
if args.json:
responses.append(rendered)
else:
print(f"Response[{idx}]: {rendered}")
latencies = [s.latency for s in samples if s.latency is not None and s.error is None]
success = len(latencies)
failure = len(samples) - success
errors = [s.error for s in samples if s.error]
stats = summarize(latencies)
if args.json:
payload = {
"interface": "retrieval",
"concurrency": args.concurrency,
"iterations": args.iterations,
"success": success,
"failure": failure,
"latency": stats,
"errors": [e for e in errors if e],
"created": created,
"total_duration_s": total_duration,
"qps": (args.iterations / total_duration) if total_duration > 0 else None,
}
if args.print_response:
payload["responses"] = responses
print(json.dumps(payload, sort_keys=True))
else:
report = retrieval_report(
interface="retrieval",
concurrency=args.concurrency,
total_duration_s=total_duration,
iterations=args.iterations,
success=success,
failure=failure,
stats=stats,
errors=[e for e in errors if e],
created=created,
)
print(report, end="")
_cleanup(client, created, args.teardown)
return 0 if failure == 0 else 1
def main() -> None:
args = _parse_args()
if not args.base_url:
raise SystemExit("Missing --base-url or HOST_ADDRESS")
if args.iterations < 1:
raise SystemExit("--iterations must be >= 1")
if args.concurrency < 1:
raise SystemExit("--concurrency must be >= 1")
client = HttpClient(
base_url=args.base_url,
api_version=args.api_version,
api_key=args.api_key,
connect_timeout=args.connect_timeout,
read_timeout=args.read_timeout,
verify_ssl=args.verify_ssl,
)
try:
_ensure_auth(client, args)
if args.command == "chat":
raise SystemExit(run_chat(client, args))
if args.command == "retrieval":
raise SystemExit(run_retrieval(client, args))
raise SystemExit("Unknown command")
except (AuthError, DatasetError, ChatError, RetrievalError) as exc:
eprint(f"Error: {exc}")
raise SystemExit(2)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/cli.py",
"license": "Apache License 2.0",
"lines": 520,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/dataset.py | from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional
from .http_client import HttpClient
try:
from requests_toolbelt import MultipartEncoder
except Exception: # pragma: no cover - fallback without toolbelt
MultipartEncoder = None
class DatasetError(RuntimeError):
pass
def create_dataset(client: HttpClient, name: str, payload: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
body = dict(payload or {})
if "name" not in body:
body["name"] = name
res = client.request_json("POST", "/datasets", json_body=body)
if res.get("code") != 0:
raise DatasetError(f"Create dataset failed: {res.get('message')}")
return res.get("data", {})
def list_datasets(client: HttpClient, dataset_id: Optional[str] = None, name: Optional[str] = None) -> List[Dict[str, Any]]:
params = {}
if dataset_id is not None:
params["id"] = dataset_id
if name is not None:
params["name"] = name
res = client.request_json("GET", "/datasets", params=params or None)
if res.get("code") != 0:
raise DatasetError(f"List datasets failed: {res.get('message')}")
return res.get("data", [])
def delete_dataset(client: HttpClient, dataset_id: str) -> None:
payload = {"ids": [dataset_id]}
res = client.request_json("DELETE", "/datasets", json_body=payload)
if res.get("code") != 0:
raise DatasetError(f"Delete dataset failed: {res.get('message')}")
def upload_documents(client: HttpClient, dataset_id: str, file_paths: Iterable[str]) -> List[Dict[str, Any]]:
paths = [Path(p) for p in file_paths]
if MultipartEncoder is None:
files = [("file", (p.name, p.open("rb"))) for p in paths]
try:
response = client.request(
"POST",
f"/datasets/{dataset_id}/documents",
headers=None,
data=None,
json_body=None,
files=files,
params=None,
stream=False,
auth_kind="api",
)
finally:
for _, (_, fh) in files:
fh.close()
res = response.json()
else:
fields = []
file_handles = []
try:
for path in paths:
fh = path.open("rb")
fields.append(("file", (path.name, fh)))
file_handles.append(fh)
encoder = MultipartEncoder(fields=fields)
headers = {"Content-Type": encoder.content_type}
response = client.request(
"POST",
f"/datasets/{dataset_id}/documents",
headers=headers,
data=encoder,
json_body=None,
params=None,
stream=False,
auth_kind="api",
)
res = response.json()
finally:
for fh in file_handles:
fh.close()
if res.get("code") != 0:
raise DatasetError(f"Upload documents failed: {res.get('message')}")
return res.get("data", [])
def parse_documents(client: HttpClient, dataset_id: str, document_ids: List[str]) -> Dict[str, Any]:
payload = {"document_ids": document_ids}
res = client.request_json("POST", f"/datasets/{dataset_id}/chunks", json_body=payload)
if res.get("code") != 0:
raise DatasetError(f"Parse documents failed: {res.get('message')}")
return res
def list_documents(client: HttpClient, dataset_id: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
res = client.request_json("GET", f"/datasets/{dataset_id}/documents", params=params)
if res.get("code") != 0:
raise DatasetError(f"List documents failed: {res.get('message')}")
return res.get("data", {})
def wait_for_parse_done(
client: HttpClient,
dataset_id: str,
document_ids: Optional[List[str]],
timeout: float,
interval: float,
) -> None:
import time
start = time.monotonic()
while True:
data = list_documents(client, dataset_id)
docs = data.get("docs", [])
target_ids = set(document_ids or [])
all_done = True
for doc in docs:
if target_ids and doc.get("id") not in target_ids:
continue
if doc.get("run") != "DONE":
all_done = False
break
if all_done:
return
if time.monotonic() - start > timeout:
raise DatasetError("Document parsing timeout")
time.sleep(max(interval, 0.1))
def extract_document_ids(documents: Iterable[Dict[str, Any]]) -> List[str]:
return [doc["id"] for doc in documents if "id" in doc]
def dataset_has_chunks(dataset_info: Dict[str, Any]) -> bool:
for key in ("chunk_count", "chunk_num"):
value = dataset_info.get(key)
if isinstance(value, int) and value > 0:
return True
return False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/dataset.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/http_client.py | import json
from typing import Any, Dict, Optional, Tuple
import requests
class HttpClient:
def __init__(
self,
base_url: str,
api_version: str = "v1",
api_key: Optional[str] = None,
login_token: Optional[str] = None,
connect_timeout: float = 5.0,
read_timeout: float = 60.0,
verify_ssl: bool = True,
) -> None:
self.base_url = base_url.rstrip("/")
self.api_version = api_version
self.api_key = api_key
self.login_token = login_token
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self.verify_ssl = verify_ssl
def api_base(self) -> str:
return f"{self.base_url}/api/{self.api_version}"
def non_api_base(self) -> str:
return f"{self.base_url}/{self.api_version}"
def build_url(self, path: str, use_api_base: bool = True) -> str:
base = self.api_base() if use_api_base else self.non_api_base()
return f"{base}/{path.lstrip('/')}"
def _headers(self, auth_kind: Optional[str], extra: Optional[Dict[str, str]]) -> Dict[str, str]:
headers = {}
if auth_kind == "api" and self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
elif auth_kind == "login" and self.login_token:
headers["Authorization"] = self.login_token
if extra:
headers.update(extra)
return headers
def request(
self,
method: str,
path: str,
*,
use_api_base: bool = True,
auth_kind: Optional[str] = "api",
headers: Optional[Dict[str, str]] = None,
json_body: Optional[Dict[str, Any]] = None,
data: Any = None,
files: Any = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False,
) -> requests.Response:
url = self.build_url(path, use_api_base=use_api_base)
merged_headers = self._headers(auth_kind, headers)
timeout: Tuple[float, float] = (self.connect_timeout, self.read_timeout)
return requests.request(
method=method,
url=url,
headers=merged_headers,
json=json_body,
data=data,
files=files,
params=params,
timeout=timeout,
stream=stream,
verify=self.verify_ssl,
)
def request_json(
self,
method: str,
path: str,
*,
use_api_base: bool = True,
auth_kind: Optional[str] = "api",
headers: Optional[Dict[str, str]] = None,
json_body: Optional[Dict[str, Any]] = None,
data: Any = None,
files: Any = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False,
) -> Dict[str, Any]:
response = self.request(
method,
path,
use_api_base=use_api_base,
auth_kind=auth_kind,
headers=headers,
json_body=json_body,
data=data,
files=files,
params=params,
stream=stream,
)
try:
return response.json()
except Exception as exc:
raise ValueError(f"Non-JSON response from {path}: {exc}") from exc
@staticmethod
def parse_json_bytes(raw: bytes) -> Dict[str, Any]:
try:
return json.loads(raw.decode("utf-8"))
except Exception as exc:
raise ValueError(f"Invalid JSON payload: {exc}") from exc
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/http_client.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/metrics.py | import math
from dataclasses import dataclass
from typing import Any, List, Optional
@dataclass
class ChatSample:
t0: float
t1: Optional[float]
t2: Optional[float]
error: Optional[str] = None
response_text: Optional[str] = None
@property
def first_token_latency(self) -> Optional[float]:
if self.t1 is None:
return None
return self.t1 - self.t0
@property
def total_latency(self) -> Optional[float]:
if self.t2 is None:
return None
return self.t2 - self.t0
@dataclass
class RetrievalSample:
t0: float
t1: Optional[float]
error: Optional[str] = None
response: Optional[Any] = None
@property
def latency(self) -> Optional[float]:
if self.t1 is None:
return None
return self.t1 - self.t0
def _percentile(sorted_values: List[float], p: float) -> Optional[float]:
if not sorted_values:
return None
n = len(sorted_values)
k = max(0, math.ceil((p / 100.0) * n) - 1)
return sorted_values[k]
def summarize(values: List[float]) -> dict:
if not values:
return {
"count": 0,
"avg": None,
"min": None,
"p50": None,
"p90": None,
"p95": None,
}
sorted_vals = sorted(values)
return {
"count": len(values),
"avg": sum(values) / len(values),
"min": sorted_vals[0],
"p50": _percentile(sorted_vals, 50),
"p90": _percentile(sorted_vals, 90),
"p95": _percentile(sorted_vals, 95),
}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/metrics.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/report.py | from typing import Dict, List, Optional
def _fmt_seconds(value: Optional[float]) -> str:
if value is None:
return "n/a"
return f"{value:.4f}s"
def _fmt_ms(value: Optional[float]) -> str:
if value is None:
return "n/a"
return f"{value * 1000.0:.2f}ms"
def _fmt_qps(qps: Optional[float]) -> str:
if qps is None or qps <= 0:
return "n/a"
return f"{qps:.2f}"
def _calc_qps(total_duration_s: Optional[float], total_requests: int) -> Optional[float]:
if total_duration_s is None or total_duration_s <= 0:
return None
return total_requests / total_duration_s
def render_report(lines: List[str]) -> str:
return "\n".join(lines).strip() + "\n"
def chat_report(
*,
interface: str,
concurrency: int,
total_duration_s: Optional[float],
iterations: int,
success: int,
failure: int,
model: str,
total_stats: Dict[str, Optional[float]],
first_token_stats: Dict[str, Optional[float]],
errors: List[str],
created: Dict[str, str],
) -> str:
lines = [
f"Interface: {interface}",
f"Concurrency: {concurrency}",
f"Iterations: {iterations}",
f"Success: {success}",
f"Failure: {failure}",
f"Model: {model}",
]
for key, value in created.items():
lines.append(f"{key}: {value}")
lines.extend(
[
"Latency (total): "
f"avg={_fmt_ms(total_stats['avg'])}, min={_fmt_ms(total_stats['min'])}, "
f"p50={_fmt_ms(total_stats['p50'])}, p90={_fmt_ms(total_stats['p90'])}, p95={_fmt_ms(total_stats['p95'])}",
"Latency (first token): "
f"avg={_fmt_ms(first_token_stats['avg'])}, min={_fmt_ms(first_token_stats['min'])}, "
f"p50={_fmt_ms(first_token_stats['p50'])}, p90={_fmt_ms(first_token_stats['p90'])}, p95={_fmt_ms(first_token_stats['p95'])}",
f"Total Duration: {_fmt_seconds(total_duration_s)}",
f"QPS (requests / total duration): {_fmt_qps(_calc_qps(total_duration_s, iterations))}",
]
)
if errors:
lines.append("Errors: " + "; ".join(errors[:5]))
return render_report(lines)
def retrieval_report(
*,
interface: str,
concurrency: int,
total_duration_s: Optional[float],
iterations: int,
success: int,
failure: int,
stats: Dict[str, Optional[float]],
errors: List[str],
created: Dict[str, str],
) -> str:
lines = [
f"Interface: {interface}",
f"Concurrency: {concurrency}",
f"Iterations: {iterations}",
f"Success: {success}",
f"Failure: {failure}",
]
for key, value in created.items():
lines.append(f"{key}: {value}")
lines.extend(
[
"Latency: "
f"avg={_fmt_ms(stats['avg'])}, min={_fmt_ms(stats['min'])}, "
f"p50={_fmt_ms(stats['p50'])}, p90={_fmt_ms(stats['p90'])}, p95={_fmt_ms(stats['p95'])}",
f"Total Duration: {_fmt_seconds(total_duration_s)}",
f"QPS (requests / total duration): {_fmt_qps(_calc_qps(total_duration_s, iterations))}",
]
)
if errors:
lines.append("Errors: " + "; ".join(errors[:5]))
return render_report(lines)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/report.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/retrieval.py | import time
from typing import Any, Dict, List, Optional
from .http_client import HttpClient
from .metrics import RetrievalSample
class RetrievalError(RuntimeError):
pass
def build_payload(
question: str,
dataset_ids: List[str],
document_ids: Optional[List[str]] = None,
payload: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
body = dict(payload or {})
if "question" not in body:
body["question"] = question
if "dataset_ids" not in body:
body["dataset_ids"] = dataset_ids
if document_ids is not None and "document_ids" not in body:
body["document_ids"] = document_ids
return body
def run_retrieval(client: HttpClient, payload: Dict[str, Any]) -> RetrievalSample:
t0 = time.perf_counter()
response = client.request("POST", "/retrieval", json_body=payload, stream=False)
raw = response.content
t1 = time.perf_counter()
try:
res = client.parse_json_bytes(raw)
except Exception as exc:
return RetrievalSample(t0=t0, t1=t1, error=f"Invalid JSON response: {exc}")
if res.get("code") != 0:
return RetrievalSample(t0=t0, t1=t1, error=res.get("message"), response=res)
return RetrievalSample(t0=t0, t1=t1, error=None, response=res)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/retrieval.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/benchmark/utils.py | import json
import sys
import time
from pathlib import Path
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def load_json_arg(value, name):
if value is None:
return None
if isinstance(value, dict):
return value
if isinstance(value, str) and value.startswith("@"):
path = Path(value[1:])
try:
return json.loads(path.read_text(encoding="utf-8"))
except Exception as exc:
raise ValueError(f"Failed to read {name} from {path}: {exc}") from exc
try:
return json.loads(value)
except Exception as exc:
raise ValueError(f"Invalid JSON for {name}: {exc}") from exc
def split_csv(value):
if value is None:
return None
if isinstance(value, list):
return value
if isinstance(value, str):
items = [item.strip() for item in value.split(",")]
return [item for item in items if item]
return [value]
def unique_name(prefix):
return f"{prefix}_{int(time.time() * 1000)}"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/benchmark/utils.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_dataset_management/test_graphrag_tasks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import bulk_upload_documents, list_documents, parse_documents, run_graphrag, trace_graphrag
from utils import wait_for
@wait_for(200, 1, "Document parsing timeout")
def _parse_done(auth, dataset_id, document_ids=None):
res = list_documents(auth, dataset_id)
target_docs = res["data"]["docs"]
if document_ids is None:
return all(doc.get("run") == "DONE" for doc in target_docs)
target_ids = set(document_ids)
for doc in target_docs:
if doc.get("id") in target_ids and doc.get("run") != "DONE":
return False
return True
class TestGraphRAGTasks:
@pytest.mark.p2
def test_trace_graphrag_before_run(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = trace_graphrag(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
assert res["data"] == {}, res
@pytest.mark.p2
def test_run_graphrag_no_documents(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = run_graphrag(HttpApiAuth, dataset_id)
assert res["code"] == 102, res
assert "No documents in Dataset" in res.get("message", ""), res
@pytest.mark.p3
def test_run_graphrag_returns_task_id(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = run_graphrag(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
assert res["data"].get("graphrag_task_id"), res
@pytest.mark.p3
def test_trace_graphrag_until_complete(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0, res
_parse_done(HttpApiAuth, dataset_id, document_ids)
res = run_graphrag(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
last_res = {}
@wait_for(200, 1, "GraphRAG task timeout")
def condition():
res = trace_graphrag(HttpApiAuth, dataset_id)
if res["code"] != 0:
return False
data = res.get("data") or {}
if not data:
return False
if data.get("task_type") != "graphrag":
return False
progress = data.get("progress")
if progress in (-1, 1, -1.0, 1.0):
last_res["res"] = res
return True
return False
condition()
res = last_res["res"]
assert res["data"]["task_type"] == "graphrag", res
assert res["data"].get("progress") in (-1, 1, -1.0, 1.0), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_dataset_management/test_graphrag_tasks.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_dataset_management/test_knowledge_graph.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import delete_knowledge_graph, knowledge_graph
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 0, "Authorization"),
(RAGFlowHttpApiAuth(INVALID_API_TOKEN), 109, "API key is invalid"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = knowledge_graph(invalid_auth, "dataset_id")
assert res["code"] == expected_code
assert expected_message in res.get("message", "")
class TestKnowledgeGraph:
@pytest.mark.p2
def test_get_knowledge_graph_empty(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = knowledge_graph(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
assert "graph" in res["data"], res
assert "mind_map" in res["data"], res
assert isinstance(res["data"]["graph"], dict), res
assert isinstance(res["data"]["mind_map"], dict), res
@pytest.mark.p2
def test_delete_knowledge_graph(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = delete_knowledge_graph(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
assert res["data"] is True, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_dataset_management/test_knowledge_graph.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_dataset_management/test_raptor_tasks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import bulk_upload_documents, list_documents, parse_documents, run_raptor, trace_raptor
from utils import wait_for
@wait_for(200, 1, "Document parsing timeout")
def _parse_done(auth, dataset_id, document_ids=None):
res = list_documents(auth, dataset_id)
target_docs = res["data"]["docs"]
if document_ids is None:
return all(doc.get("run") == "DONE" for doc in target_docs)
target_ids = set(document_ids)
for doc in target_docs:
if doc.get("id") in target_ids and doc.get("run") != "DONE":
return False
return True
class TestRaptorTasks:
@pytest.mark.p2
def test_trace_raptor_before_run(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = trace_raptor(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
assert res["data"] == {}, res
@pytest.mark.p2
def test_run_raptor_no_documents(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func
res = run_raptor(HttpApiAuth, dataset_id)
assert res["code"] == 102, res
assert "No documents in Dataset" in res.get("message", ""), res
@pytest.mark.p3
def test_run_raptor_returns_task_id(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = run_raptor(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
assert res["data"].get("raptor_task_id"), res
@pytest.mark.p3
def test_trace_raptor_until_complete(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0, res
_parse_done(HttpApiAuth, dataset_id, document_ids)
res = run_raptor(HttpApiAuth, dataset_id)
assert res["code"] == 0, res
last_res = {}
@wait_for(200, 1, "RAPTOR task timeout")
def condition():
res = trace_raptor(HttpApiAuth, dataset_id)
if res["code"] != 0:
return False
data = res.get("data") or {}
if not data:
return False
if data.get("task_type") != "raptor":
return False
progress = data.get("progress")
if progress in (-1, 1, -1.0, 1.0):
last_res["res"] = res
return True
return False
condition()
res = last_res["res"]
assert res["data"]["task_type"] == "raptor", res
assert res["data"].get("progress") in (-1, 1, -1.0, 1.0), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_dataset_management/test_raptor_tasks.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_metadata_summary.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Although the docs group this under "chunk management," the backend aggregates
# Document.meta_fields via document_service#get_metadata_summary and the test
# uses update_document, so it belongs with file/document management tests.
# import pytest
#from common import metadata_summary, update_document
def _summary_to_counts(summary):
counts = {}
for key, field_data in summary.items():
# New format: {key: {"type": "...", "values": [[value, count], ...]}}
pairs = field_data["values"]
counts[key] = {str(k): v for k, v in pairs}
return counts
class TestMetadataSummary:
pass
# Alteration of API
# TODO
#@pytest.mark.p2
#def test_metadata_summary_counts(self, HttpApiAuth, add_documents_func):
# dataset_id, document_ids = add_documents_func
# payloads = [
# {"tags": ["foo", "bar"], "author": "alice"},
# {"tags": ["foo"], "author": "bob"},
# {"tags": ["bar", "baz"], "author": None},
# ]
# for doc_id, meta_fields in zip(document_ids, payloads):
# res = update_document(HttpApiAuth, dataset_id, doc_id, {"meta_fields": meta_fields})
# assert res["code"] == 0, res
# res = metadata_summary(HttpApiAuth, dataset_id)
# assert res["code"] == 0, res
# summary = res["data"]["summary"]
# counts = _summary_to_counts(summary)
# assert counts["tags"]["foo"] == 2, counts
# assert counts["tags"]["bar"] == 2, counts
# assert counts["tags"]["baz"] == 1, counts
# assert counts["author"]["alice"] == 1, counts
# assert counts["author"]["bob"] == 1, counts
# assert "None" not in counts["author"], counts
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_metadata_summary.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_agent_completions.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
agent_completions,
create_agent,
create_agent_session,
delete_agent,
delete_agent_sessions,
list_agents,
)
AGENT_TITLE = "test_agent_http"
MINIMAL_DSL = {
"components": {
"begin": {
"obj": {"component_name": "Begin", "params": {}},
"downstream": ["message"],
"upstream": [],
},
"message": {
"obj": {"component_name": "Message", "params": {"content": ["{sys.query}"]}},
"downstream": [],
"upstream": ["begin"],
},
},
"history": [],
"retrieval": [],
"path": [],
"globals": {
"sys.query": "",
"sys.user_id": "",
"sys.conversation_turns": 0,
"sys.files": [],
},
"variables": {},
}
@pytest.fixture(scope="function")
def agent_id(HttpApiAuth, request):
res = list_agents(HttpApiAuth, {"page_size": 1000})
assert res["code"] == 0, res
for agent in res.get("data", []):
if agent.get("title") == AGENT_TITLE:
delete_agent(HttpApiAuth, agent["id"])
res = create_agent(HttpApiAuth, {"title": AGENT_TITLE, "dsl": MINIMAL_DSL})
assert res["code"] == 0, res
res = list_agents(HttpApiAuth, {"title": AGENT_TITLE})
assert res["code"] == 0, res
assert res.get("data"), res
agent_id = res["data"][0]["id"]
def cleanup():
delete_agent_sessions(HttpApiAuth, agent_id)
delete_agent(HttpApiAuth, agent_id)
request.addfinalizer(cleanup)
return agent_id
class TestAgentCompletions:
@pytest.mark.p2
def test_agent_completion_stream_false(self, HttpApiAuth, agent_id):
res = create_agent_session(HttpApiAuth, agent_id, payload={})
assert res["code"] == 0, res
session_id = res["data"]["id"]
res = agent_completions(
HttpApiAuth,
agent_id,
{"question": "hello", "stream": False, "session_id": session_id},
)
assert res["code"] == 0, res
if isinstance(res["data"], dict):
assert isinstance(res["data"].get("data"), dict), res
content = res["data"]["data"].get("content", "")
assert content, res
assert "hello" in content, res
assert res["data"].get("session_id") == session_id, res
else:
assert isinstance(res["data"], str), res
assert res["data"].startswith("**ERROR**"), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_agent_completions.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_agent_sessions.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import requests
from common import (
create_agent,
create_agent_session,
delete_agent,
delete_agent_sessions,
list_agent_sessions,
list_agents,
)
from configs import HOST_ADDRESS, VERSION
AGENT_TITLE = "test_agent_http"
MINIMAL_DSL = {
"components": {
"begin": {
"obj": {"component_name": "Begin", "params": {}},
"downstream": ["message"],
"upstream": [],
},
"message": {
"obj": {"component_name": "Message", "params": {"content": ["{sys.query}"]}},
"downstream": [],
"upstream": ["begin"],
},
},
"history": [],
"retrieval": [],
"path": [],
"globals": {
"sys.query": "",
"sys.user_id": "",
"sys.conversation_turns": 0,
"sys.files": [],
},
"variables": {},
}
@pytest.fixture(scope="function")
def agent_id(HttpApiAuth, request):
res = list_agents(HttpApiAuth, {"page_size": 1000})
assert res["code"] == 0, res
for agent in res.get("data", []):
if agent.get("title") == AGENT_TITLE:
delete_agent(HttpApiAuth, agent["id"])
res = create_agent(HttpApiAuth, {"title": AGENT_TITLE, "dsl": MINIMAL_DSL})
assert res["code"] == 0, res
res = list_agents(HttpApiAuth, {"title": AGENT_TITLE})
assert res["code"] == 0, res
assert res.get("data"), res
agent_id = res["data"][0]["id"]
def cleanup():
delete_agent_sessions(HttpApiAuth, agent_id)
delete_agent(HttpApiAuth, agent_id)
request.addfinalizer(cleanup)
return agent_id
class TestAgentSessions:
@pytest.mark.p2
def test_create_list_delete_agent_sessions(self, HttpApiAuth, agent_id):
res = create_agent_session(HttpApiAuth, agent_id, payload={})
assert res["code"] == 0, res
session_id = res["data"]["id"]
assert res["data"]["agent_id"] == agent_id, res
res = list_agent_sessions(HttpApiAuth, agent_id, params={"id": session_id})
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
assert res["data"][0]["id"] == session_id, res
res = delete_agent_sessions(HttpApiAuth, agent_id, {"ids": [session_id]})
assert res["code"] == 0, res
@pytest.mark.p2
def test_agent_crud_validation_contract(self, HttpApiAuth, agent_id):
res = list_agents(HttpApiAuth, {"id": "missing-agent-id", "title": "missing-agent-title"})
assert res["code"] == 102, res
assert "doesn't exist" in res["message"], res
res = list_agents(HttpApiAuth, {"title": AGENT_TITLE, "desc": "true", "page_size": 1})
assert res["code"] == 0, res
res = create_agent(HttpApiAuth, {"title": "missing-dsl-agent"})
assert res["code"] == 101, res
assert "No DSL data in request" in res["message"], res
res = create_agent(HttpApiAuth, {"dsl": MINIMAL_DSL})
assert res["code"] == 101, res
assert "No title in request" in res["message"], res
res = create_agent(HttpApiAuth, {"title": AGENT_TITLE, "dsl": MINIMAL_DSL})
assert res["code"] == 102, res
assert "already exists" in res["message"], res
update_url = f"{HOST_ADDRESS}/api/{VERSION}/agents/invalid-agent-id"
res = requests.put(update_url, auth=HttpApiAuth, json={"title": "updated", "dsl": MINIMAL_DSL}).json()
assert res["code"] == 103, res
assert "Only owner of canvas authorized" in res["message"], res
res = delete_agent(HttpApiAuth, "invalid-agent-id")
assert res["code"] == 103, res
assert "Only owner of canvas authorized" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_agent_sessions.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_chat_completions.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
bulk_upload_documents,
chat_completions,
create_chat_assistant,
create_session_with_chat_assistant,
delete_chat_assistants,
delete_session_with_chat_assistants,
list_documents,
parse_documents,
)
from utils import wait_for
@wait_for(200, 1, "Document parsing timeout")
def _parse_done(auth, dataset_id, document_ids=None):
res = list_documents(auth, dataset_id)
target_docs = res["data"]["docs"]
if document_ids is None:
return all(doc.get("run") == "DONE" for doc in target_docs)
target_ids = set(document_ids)
for doc in target_docs:
if doc.get("id") in target_ids and doc.get("run") != "DONE":
return False
return True
class TestChatCompletions:
@pytest.mark.p3
def test_chat_completion_stream_false_with_session(self, HttpApiAuth, add_dataset_func, tmp_path, request):
dataset_id = add_dataset_func
document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, 1, tmp_path)
res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0, res
_parse_done(HttpApiAuth, dataset_id, document_ids)
res = create_chat_assistant(HttpApiAuth, {"name": "chat_completion_test", "dataset_ids": [dataset_id]})
assert res["code"] == 0, res
chat_id = res["data"]["id"]
request.addfinalizer(lambda: delete_session_with_chat_assistants(HttpApiAuth, chat_id))
request.addfinalizer(lambda: delete_chat_assistants(HttpApiAuth))
res = create_session_with_chat_assistant(HttpApiAuth, chat_id, {"name": "session_for_completion"})
assert res["code"] == 0, res
session_id = res["data"]["id"]
res = chat_completions(
HttpApiAuth,
chat_id,
{"question": "hello", "stream": False, "session_id": session_id},
)
assert res["code"] == 0, res
assert isinstance(res["data"], dict), res
for key in ["answer", "reference", "audio_binary", "id", "session_id"]:
assert key in res["data"], res
assert res["data"]["session_id"] == session_id, res
@pytest.mark.p2
def test_chat_completion_invalid_chat(self, HttpApiAuth):
res = chat_completions(
HttpApiAuth,
"invalid_chat_id",
{"question": "hello", "stream": False, "session_id": "invalid_session"},
)
assert res["code"] == 102, res
assert "You don't own the chat" in res.get("message", ""), res
@pytest.mark.p2
def test_chat_completion_invalid_session(self, HttpApiAuth, request):
res = create_chat_assistant(HttpApiAuth, {"name": "chat_completion_invalid_session", "dataset_ids": []})
assert res["code"] == 0, res
chat_id = res["data"]["id"]
request.addfinalizer(lambda: delete_session_with_chat_assistants(HttpApiAuth, chat_id))
request.addfinalizer(lambda: delete_chat_assistants(HttpApiAuth))
res = chat_completions(
HttpApiAuth,
chat_id,
{"question": "hello", "stream": False, "session_id": "invalid_session"},
)
assert res["code"] == 102, res
assert "You don't own the session" in res.get("message", ""), res
@pytest.mark.p2
def test_chat_completion_invalid_metadata_condition(self, HttpApiAuth, request):
res = create_chat_assistant(HttpApiAuth, {"name": "chat_completion_invalid_meta", "dataset_ids": []})
assert res["code"] == 0, res
chat_id = res["data"]["id"]
request.addfinalizer(lambda: delete_session_with_chat_assistants(HttpApiAuth, chat_id))
request.addfinalizer(lambda: delete_chat_assistants(HttpApiAuth))
res = create_session_with_chat_assistant(HttpApiAuth, chat_id, {"name": "session_for_meta"})
assert res["code"] == 0, res
session_id = res["data"]["id"]
res = chat_completions(
HttpApiAuth,
chat_id,
{
"question": "hello",
"stream": False,
"session_id": session_id,
"metadata_condition": "invalid",
},
)
assert res["code"] == 102, res
assert "metadata_condition" in res.get("message", ""), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_chat_completions.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_related_questions.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import related_questions
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowHttpApiAuth
class TestRelatedQuestions:
@pytest.mark.p3
def test_related_questions_success(self, HttpApiAuth):
res = related_questions(HttpApiAuth, {"question": "ragflow", "industry": "search"})
assert res["code"] == 0, res
assert isinstance(res.get("data"), list), res
@pytest.mark.p2
def test_related_questions_missing_question(self, HttpApiAuth):
res = related_questions(HttpApiAuth, {"industry": "search"})
assert res["code"] == 102, res
assert "question" in res.get("message", ""), res
@pytest.mark.p2
def test_related_questions_invalid_auth(self):
res = related_questions(RAGFlowHttpApiAuth(INVALID_API_TOKEN), {"question": "ragflow", "industry": "search"})
assert res["code"] == 109, res
assert "API key is invalid" in res.get("message", ""), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_session_management/test_related_questions.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_router_errors.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import requests
from configs import HOST_ADDRESS, VERSION
@pytest.mark.p3
def test_route_not_found_returns_json():
url = f"{HOST_ADDRESS}/api/{VERSION}/__missing_route__"
res = requests.get(url)
assert res.status_code == 404
payload = res.json()
assert payload["error"] == "Not Found"
assert f"/api/{VERSION}/__missing_route__" in payload["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_router_errors.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:rag/advanced_rag/tree_structured_query_decomposition_retrieval.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
from functools import partial
from api.db.services.llm_service import LLMBundle
from rag.prompts import kb_prompt
from rag.prompts.generator import sufficiency_check, multi_queries_gen
from rag.utils.tavily_conn import Tavily
from timeit import default_timer as timer
class TreeStructuredQueryDecompositionRetrieval:
def __init__(self,
chat_mdl: LLMBundle,
prompt_config: dict,
kb_retrieve: partial = None,
kg_retrieve: partial = None
):
self.chat_mdl = chat_mdl
self.prompt_config = prompt_config
self._kb_retrieve = kb_retrieve
self._kg_retrieve = kg_retrieve
self._lock = asyncio.Lock()
async def _retrieve_information(self, search_query):
"""Retrieve information from different sources"""
# 1. Knowledge base retrieval
kbinfos = []
try:
kbinfos = await self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
except Exception as e:
logging.error(f"Knowledge base retrieval error: {e}")
# 2. Web retrieval (if Tavily API is configured)
try:
if self.prompt_config.get("tavily_api_key"):
tav = Tavily(self.prompt_config["tavily_api_key"])
tav_res = tav.retrieve_chunks(search_query)
kbinfos["chunks"].extend(tav_res["chunks"])
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
except Exception as e:
logging.error(f"Web retrieval error: {e}")
# 3. Knowledge graph retrieval (if configured)
try:
if self.prompt_config.get("use_kg") and self._kg_retrieve:
ck = await self._kg_retrieve(question=search_query)
if ck["content_with_weight"]:
kbinfos["chunks"].insert(0, ck)
except Exception as e:
logging.error(f"Knowledge graph retrieval error: {e}")
return kbinfos
async def _async_update_chunk_info(self, chunk_info, kbinfos):
async with self._lock:
"""Update chunk information for citations"""
if not chunk_info["chunks"]:
# If this is the first retrieval, use the retrieval results directly
for k in chunk_info.keys():
chunk_info[k] = kbinfos[k]
else:
# Merge newly retrieved information, avoiding duplicates
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
for c in kbinfos["chunks"]:
if c["chunk_id"] not in cids:
chunk_info["chunks"].append(c)
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
for d in kbinfos["doc_aggs"]:
if d["doc_id"] not in dids:
chunk_info["doc_aggs"].append(d)
async def research(self, chunk_info, question, query, depth=3, callback=None):
if callback:
await callback("<START_DEEP_RESEARCH>")
await self._research(chunk_info, question, query, depth, callback)
if callback:
await callback("<END_DEEP_RESEARCH>")
async def _research(self, chunk_info, question, query, depth=3, callback=None):
if depth == 0:
#if callback:
# await callback("Reach the max search depth.")
return ""
if callback:
await callback(f"Searching by `{query}`...")
st = timer()
ret = await self._retrieve_information(query)
if callback:
await callback("Retrieval %d results in %.1fms"%(len(ret["chunks"]), (timer()-st)*1000))
await self._async_update_chunk_info(chunk_info, ret)
ret = kb_prompt(ret, self.chat_mdl.max_length*0.5)
if callback:
await callback("Checking the sufficiency for retrieved information.")
suff = await sufficiency_check(self.chat_mdl, question, ret)
if suff["is_sufficient"]:
if callback:
await callback(f"Yes, the retrieved information is sufficient for '{question}'.")
return ret
#if callback:
# await callback("The retrieved information is not sufficient. Planing next steps...")
succ_question_info = await multi_queries_gen(self.chat_mdl, question, query, suff["missing_information"], ret)
if callback:
await callback("Next step is to search for the following questions:</br> - " + "</br> - ".join(step["question"] for step in succ_question_info["questions"]))
steps = []
for step in succ_question_info["questions"]:
steps.append(asyncio.create_task(self._research(chunk_info, step["question"], step["query"], depth-1, callback)))
results = await asyncio.gather(*steps, return_exceptions=True)
return "\n".join([str(r) for r in results])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/advanced_rag/tree_structured_query_decomposition_retrieval.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:sdk/python/ragflow_sdk/modules/memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .base import Base
class Memory(Base):
def __init__(self, rag, res_dict):
self.id = ""
self.name = ""
self.avatar = None
self.tenant_id = None
self.owner_name = ""
self.memory_type = ["raw"]
self.storage_type = "table"
self.embd_id = ""
self.llm_id = ""
self.permissions = "me"
self.description = ""
self.memory_size = 5 * 1024 * 1024
self.forgetting_policy = "FIFO"
self.temperature = 0.5,
self.system_prompt = ""
self.user_prompt = ""
for k in list(res_dict.keys()):
if k not in self.__dict__:
res_dict.pop(k)
super().__init__(rag, res_dict)
def update(self, update_dict: dict):
res = self.put(f"/memories/{self.id}", update_dict)
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
self._update_from_dict(self.rag, res.get("data", {}))
return self
def get_config(self):
res = self.get(f"/memories/{self.id}/config")
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
self._update_from_dict(self.rag, res.get("data", {}))
return self
def list_memory_messages(self, agent_id: str | list[str]=None, keywords: str=None, page: int=1, page_size: int=50):
params = {
"agent_id": agent_id,
"keywords": keywords,
"page": page,
"page_size": page_size
}
res = self.get(f"/memories/{self.id}", params)
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
return res["data"]
def forget_message(self, message_id: int):
res = self.rm(f"/messages/{self.id}:{message_id}", {})
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
return True
def update_message_status(self, message_id: int, status: bool):
update_message = {
"status": status
}
res = self.put(f"/messages/{self.id}:{message_id}", update_message)
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
return True
def get_message_content(self, message_id: int) -> dict:
res = self.get(f"/messages/{self.id}:{message_id}/content")
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
return res["data"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "sdk/python/ragflow_sdk/modules/memory.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/testcases/test_sdk_api/test_memory_management/test_create_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import re
import pytest
from configs import INVALID_API_TOKEN, HOST_ADDRESS
from ragflow_sdk import RAGFlow
from hypothesis import example, given, settings
from utils.hypothesis_utils import valid_names
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"]
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.create_memory(**{"name": "test_memory", "memory_type": ["raw"], "embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW", "llm_id": "glm-4-flash@ZHIPU-AI"})
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("delete_test_memory")
class TestMemoryCreate:
@pytest.mark.p1
@given(name=valid_names())
@example("e" * 128)
@settings(max_examples=20)
def test_name(self, client, name):
payload = {
"name": name,
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(0, 3)),
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
memory = client.create_memory(**payload)
pattern = rf'^{name}|{name}(?:\((\d+)\))?$'
escaped_name = re.escape(memory.name)
assert re.match(pattern, escaped_name), str(memory)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "Memory name cannot be empty or whitespace."),
(" ", "Memory name cannot be empty or whitespace."),
("a" * 129, f"Memory name '{'a'*129}' exceeds limit of 128."),
],
ids=["empty_name", "space_name", "too_long_name"],
)
def test_name_invalid(self, client, name, expected_message):
payload = {
"name": name,
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(0, 3)),
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
with pytest.raises(Exception) as exception_info:
client.create_memory(**payload)
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.p2
@given(name=valid_names())
@settings(deadline=None)
def test_type_invalid(self, client, name):
payload = {
"name": name,
"memory_type": ["something"],
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
with pytest.raises(Exception) as exception_info:
client.create_memory(**payload)
assert str(exception_info.value) == f"Memory type '{ {'something'} }' is not supported.", str(exception_info.value)
@pytest.mark.p3
def test_name_duplicated(self, client):
name = "duplicated_name_test"
payload = {
"name": name,
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(0, 3)),
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
res1 = client.create_memory(**payload)
assert res1.name == name, str(res1)
res2 = client.create_memory(**payload)
assert res2.name == f"{name}(1)", str(res2)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_memory_management/test_create_memory.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_memory_management/test_list_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from ragflow_sdk import RAGFlow
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.list_memory()
assert str(exception_info.value) == expected_message, str(exception_info.value)
class TestCapability:
@pytest.mark.p3
def test_capability(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.list_memory) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.usefixtures("add_memory_func")
class TestMemoryList:
@pytest.mark.p2
def test_params_unset(self, client):
res = client.list_memory()
assert len(res["memory_list"]) == 3, str(res)
assert res["total_count"] == 3, str(res)
@pytest.mark.p2
def test_params_empty(self, client):
res = client.list_memory(**{})
assert len(res["memory_list"]) == 3, str(res)
assert res["total_count"] == 3, str(res)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 1, "page_size": 10}, 3),
({"page": 2, "page_size": 10}, 0),
({"page": 1, "page_size": 2}, 2),
({"page": 2, "page_size": 2}, 1),
({"page": 5, "page_size": 10}, 0),
],
ids=["normal_first_page", "beyond_max_page", "normal_last_partial_page" , "normal_middle_page",
"full_data_single_page"],
)
def test_page(self, client, params, expected_page_size):
# have added 3 memories in fixture
res = client.list_memory(**params)
assert len(res["memory_list"]) == expected_page_size, str(res)
assert res["total_count"] == 3, str(res)
@pytest.mark.p2
def test_filter_memory_type(self, client):
res = client.list_memory(**{"memory_type": ["semantic"]})
for memory in res["memory_list"]:
assert "semantic" in memory.memory_type, str(memory)
@pytest.mark.p2
def test_filter_multi_memory_type(self, client):
res = client.list_memory(**{"memory_type": ["episodic", "procedural"]})
for memory in res["memory_list"]:
assert "episodic" in memory.memory_type or "procedural" in memory.memory_type, str(memory)
@pytest.mark.p2
def test_filter_storage_type(self, client):
res = client.list_memory(**{"storage_type": "table"})
for memory in res["memory_list"]:
assert memory.storage_type == "table", str(memory)
@pytest.mark.p2
def test_match_keyword(self, client):
res = client.list_memory(**{"keywords": "s"})
for memory in res["memory_list"]:
assert "s" in memory.name, str(memory)
@pytest.mark.p1
def test_get_config(self, client):
memory_list = client.list_memory()
assert len(memory_list["memory_list"]) > 0, str(memory_list)
memory = memory_list["memory_list"][0]
memory_id = memory.id
memory_config = memory.get_config()
assert memory_config.id == memory_id, memory_config
for field in ["name", "avatar", "tenant_id", "owner_name", "memory_type", "storage_type",
"embd_id", "llm_id", "permissions", "description", "memory_size", "forgetting_policy",
"temperature", "system_prompt", "user_prompt"]:
assert hasattr(memory, field), memory_config
@pytest.mark.p2
def test_get_config_invalid_memory_id_raises(self, client):
memory_list = client.list_memory()
assert len(memory_list["memory_list"]) > 0, str(memory_list)
memory = memory_list["memory_list"][0]
memory.id = "missing-memory-id-for-config"
with pytest.raises(Exception) as exception_info:
memory.get_config()
assert str(exception_info.value), exception_info
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_memory_management/test_list_memory.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_memory_management/test_rm_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from ragflow_sdk import RAGFlow
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.delete_memory("some_memory_id")
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_func")
class TestMemoryDelete:
@pytest.mark.p1
def test_memory_id(self, client):
memory_ids = self.memory_ids
client.delete_memory(memory_ids[0])
res = client.list_memory()
assert res["total_count"] == 2, res
@pytest.mark.p2
def test_id_wrong_uuid(self, client):
with pytest.raises(Exception) as exception_info:
client.delete_memory("d94a8dc02c9711f0930f7fbc369eab6d")
assert exception_info.value, str(exception_info.value)
res = client.list_memory()
assert len(res["memory_list"]) == 2, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_memory_management/test_rm_memory.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_memory_management/test_update_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from configs import INVALID_API_TOKEN, HOST_ADDRESS
from ragflow_sdk import RAGFlow, Memory
from hypothesis import HealthCheck, example, given, settings
from utils import encode_avatar
from utils.file_utils import create_image_file
from utils.hypothesis_utils import valid_names
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"]
)
def test_auth_invalid(self, invalid_auth, expected_message):
with pytest.raises(Exception) as exception_info:
client = RAGFlow(invalid_auth, HOST_ADDRESS)
memory = Memory(client, {"id": "memory_id"})
memory.update({"name": "New_Name"})
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_func")
class TestMemoryUpdate:
@pytest.mark.p1
@given(name=valid_names())
@example("f" * 128)
@settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_name(self, client, name):
memory_ids = self.memory_ids
update_dict = {"name": name}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.name == name, str(res)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "Memory name cannot be empty or whitespace."),
(" ", "Memory name cannot be empty or whitespace."),
("a" * 129, f"Memory name '{'a' * 129}' exceeds limit of 128."),
]
)
def test_name_invalid(self, client, name, expected_message):
memory_ids = self.memory_ids
update_dict = {"name": name}
memory = Memory(client, {"id": random.choice(memory_ids)})
with pytest.raises(Exception) as exception_info:
memory.update(update_dict)
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.p2
def test_duplicate_name(self, client):
memory_ids = self.memory_ids
update_dict = {"name": "Test_Memory"}
memory_0 = Memory(client, {"id": memory_ids[0]})
res_0 = memory_0.update(update_dict)
assert res_0.name == "Test_Memory", str(res_0)
memory_1 = Memory(client, {"id": memory_ids[1]})
res_1 = memory_1.update(update_dict)
assert res_1.name == "Test_Memory(1)", str(res_1)
@pytest.mark.p2
def test_avatar(self, client, tmp_path):
memory_ids = self.memory_ids
fn = create_image_file(tmp_path / "ragflow_test.png")
update_dict = {"avatar": f"data:image/png;base64,{encode_avatar(fn)}"}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.avatar == f"data:image/png;base64,{encode_avatar(fn)}", str(res)
@pytest.mark.p2
def test_description(self, client):
memory_ids = self.memory_ids
description = "This is a test description."
update_dict = {"description": description}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.description == description, str(res)
@pytest.mark.p1
def test_llm(self, client):
memory_ids = self.memory_ids
llm_id = "glm-4@ZHIPU-AI"
update_dict = {"llm_id": llm_id}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.llm_id == llm_id, str(res)
@pytest.mark.p2
@pytest.mark.parametrize(
"permission",
[
"me",
"team"
],
ids=["me", "team"]
)
def test_permission(self, client, permission):
memory_ids = self.memory_ids
update_dict = {"permissions": permission}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.permissions == permission.lower().strip(), str(res)
@pytest.mark.p1
def test_memory_size(self, client):
memory_ids = self.memory_ids
memory_size = 1048576 # 1 MB
update_dict = {"memory_size": memory_size}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.memory_size == memory_size, str(res)
@pytest.mark.p1
def test_temperature(self, client):
memory_ids = self.memory_ids
temperature = 0.7
update_dict = {"temperature": temperature}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.temperature == temperature, str(res)
@pytest.mark.p1
def test_system_prompt(self, client):
memory_ids = self.memory_ids
system_prompt = "This is a system prompt."
update_dict = {"system_prompt": system_prompt}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.system_prompt == system_prompt, str(res)
@pytest.mark.p1
def test_user_prompt(self, client):
memory_ids = self.memory_ids
user_prompt = "This is a user prompt."
update_dict = {"user_prompt": user_prompt}
memory = Memory(client, {"id": random.choice(memory_ids)})
res = memory.update(update_dict)
assert res.user_prompt == user_prompt, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_memory_management/test_update_memory.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_add_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import uuid
import pytest
from ragflow_sdk import RAGFlow, Memory
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.add_message(**{
"memory_id": [""],
"agent_id": "",
"session_id": "",
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": ""
})
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_empty_raw_type_memory")
class TestAddRawMessage:
@pytest.mark.p1
def test_add_raw_message(self, client):
memory_id = self.memory_id
agent_id = uuid.uuid4().hex
session_id = uuid.uuid4().hex
message_payload = {
"memory_id": [memory_id],
"agent_id": agent_id,
"session_id": session_id,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": """
A pineapple is a tropical fruit known for its sweet, tangy flavor and distinctive, spiky appearance. Here are the key facts:
Scientific Name: Ananas comosus
Physical Description: It has a tough, spiky, diamond-patterned outer skin (rind) that is usually green, yellow, or brownish. Inside, the juicy yellow flesh surrounds a fibrous core.
Growth: Unlike most fruits, pineapples do not grow on trees. They grow from a central stem as a composite fruit, meaning they are formed from many individual berries that fuse together around the core. They grow on a short, leafy plant close to the ground.
Uses: Pineapples are eaten fresh, cooked, grilled, juiced, or canned. They are a popular ingredient in desserts, fruit salads, savory dishes (like pizzas or ham glazes), smoothies, and cocktails.
Nutrition: They are a good source of Vitamin C, manganese, and contain an enzyme called bromelain, which aids in digestion and can tenderize meat.
Symbolism: The pineapple is a traditional symbol of hospitality and welcome in many cultures.
Are you asking about the fruit itself, or its use in a specific context?
"""
}
add_res = client.add_message(**message_payload)
assert add_res == "All add to task.", str(add_res)
time.sleep(2) # make sure refresh to index before search
memory = Memory(client, {"id": memory_id})
message_res = memory.list_memory_messages(**{"agent_id": agent_id, "keywords": session_id})
assert message_res["messages"]["total_count"] > 0
for message in message_res["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
assert message["session_id"] == session_id, message
@pytest.mark.usefixtures("add_empty_multiple_type_memory")
class TestAddMultipleTypeMessage:
@pytest.mark.p1
def test_add_multiple_type_message(self, client):
memory_id = self.memory_id
agent_id = uuid.uuid4().hex
session_id = uuid.uuid4().hex
message_payload = {
"memory_id": [memory_id],
"agent_id": agent_id,
"session_id": session_id,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": """
A pineapple is a tropical fruit known for its sweet, tangy flavor and distinctive, spiky appearance. Here are the key facts:
Scientific Name: Ananas comosus
Physical Description: It has a tough, spiky, diamond-patterned outer skin (rind) that is usually green, yellow, or brownish. Inside, the juicy yellow flesh surrounds a fibrous core.
Growth: Unlike most fruits, pineapples do not grow on trees. They grow from a central stem as a composite fruit, meaning they are formed from many individual berries that fuse together around the core. They grow on a short, leafy plant close to the ground.
Uses: Pineapples are eaten fresh, cooked, grilled, juiced, or canned. They are a popular ingredient in desserts, fruit salads, savory dishes (like pizzas or ham glazes), smoothies, and cocktails.
Nutrition: They are a good source of Vitamin C, manganese, and contain an enzyme called bromelain, which aids in digestion and can tenderize meat.
Symbolism: The pineapple is a traditional symbol of hospitality and welcome in many cultures.
Are you asking about the fruit itself, or its use in a specific context?
"""
}
add_res = client.add_message(**message_payload)
assert add_res == "All add to task.", str(add_res)
time.sleep(2) # make sure refresh to index before search
memory = Memory(client, {"id": memory_id})
message_res = memory.list_memory_messages(**{"agent_id": agent_id, "keywords": session_id})
assert message_res["messages"]["total_count"] > 0
for message in message_res["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
assert message["session_id"] == session_id, message
@pytest.mark.usefixtures("add_2_multiple_type_memory")
class TestAddToMultipleMemory:
@pytest.mark.p1
def test_add_to_multiple_memory(self, client):
memory_ids = self.memory_ids
agent_id = uuid.uuid4().hex
session_id = uuid.uuid4().hex
message_payload = {
"memory_id": memory_ids,
"agent_id": agent_id,
"session_id": session_id,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": """
A pineapple is a tropical fruit known for its sweet, tangy flavor and distinctive, spiky appearance. Here are the key facts:
Scientific Name: Ananas comosus
Physical Description: It has a tough, spiky, diamond-patterned outer skin (rind) that is usually green, yellow, or brownish. Inside, the juicy yellow flesh surrounds a fibrous core.
Growth: Unlike most fruits, pineapples do not grow on trees. They grow from a central stem as a composite fruit, meaning they are formed from many individual berries that fuse together around the core. They grow on a short, leafy plant close to the ground.
Uses: Pineapples are eaten fresh, cooked, grilled, juiced, or canned. They are a popular ingredient in desserts, fruit salads, savory dishes (like pizzas or ham glazes), smoothies, and cocktails.
Nutrition: They are a good source of Vitamin C, manganese, and contain an enzyme called bromelain, which aids in digestion and can tenderize meat.
Symbolism: The pineapple is a traditional symbol of hospitality and welcome in many cultures.
Are you asking about the fruit itself, or its use in a specific context?
"""
}
add_res = client.add_message(**message_payload)
assert add_res == "All add to task.", str(add_res)
time.sleep(2) # make sure refresh to index before search
for memory_id in memory_ids:
memory = Memory(client, {"id": memory_id})
message_res = memory.list_memory_messages(**{"agent_id": agent_id, "keywords": session_id})
assert message_res["messages"]["total_count"] > 0
for message in message_res["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
assert message["session_id"] == session_id, message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_add_message.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_forget_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from ragflow_sdk import RAGFlow, Memory
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
memory = Memory(client, {"id": "empty_memory_id"})
memory.forget_message(0)
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestForgetMessage:
@pytest.mark.p1
def test_forget_message(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
list_res = memory.list_memory_messages()
assert len(list_res["messages"]["message_list"]) > 0
message = random.choice(list_res["messages"]["message_list"])
res = memory.forget_message(message["message_id"])
assert res, str(res)
forgot_message_res = memory.get_message_content(message["message_id"])
assert forgot_message_res["forget_at"] not in ["-", ""], forgot_message_res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_forget_message.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_get_message_content.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from ragflow_sdk import RAGFlow, Memory
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(INVALID_API_TOKEN, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
memory = Memory(client, {"id": "empty_memory_id"})
memory.get_message_content(0)
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_with_multiple_type_message_func")
class TestGetMessageContent:
@pytest.mark.p1
def test_get_message_content(self,client):
memory_id = self.memory_id
recent_messages = client.get_recent_messages([memory_id])
assert len(recent_messages) > 0, recent_messages
message = random.choice(recent_messages)
message_id = message["message_id"]
memory = Memory(client, {"id": memory_id})
content_res = memory.get_message_content(message_id)
for field in ["content", "content_embed"]:
assert field in content_res
assert content_res[field] is not None, content_res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_get_message_content.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_get_recent_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from ragflow_sdk import RAGFlow
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.get_recent_messages(["some_memory_id"])
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestGetRecentMessage:
@pytest.mark.p1
def test_get_recent_messages(self, client):
memory_id = self.memory_id
res = client.get_recent_messages([memory_id])
assert len(res) == 5, res
@pytest.mark.p2
def test_filter_recent_messages_by_agent(self, client):
memory_id = self.memory_id
agent_ids = self.agent_ids
agent_id = random.choice(agent_ids)
res = client.get_recent_messages(**{"agent_id": agent_id, "memory_id": [memory_id]})
for message in res:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
def test_filter_recent_messages_by_session(self, client):
memory_id = self.memory_id
session_ids = self.session_ids
session_id = random.choice(session_ids)
res = client.get_recent_messages(**{"session_id": session_id, "memory_id": [memory_id]})
for message in res:
assert message["session_id"] == session_id, message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_get_recent_message.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_list_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import random
import pytest
from ragflow_sdk import RAGFlow, Memory
from configs import INVALID_API_TOKEN, HOST_ADDRESS
from utils.engine_utils import get_doc_engine
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
memory = Memory(client, {"id": "empty_memory_id"})
memory.list_memory_messages()
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestMessageList:
@pytest.mark.p2
def test_params_unset(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
res = memory.list_memory_messages()
assert len(res["messages"]["message_list"]) == 5, str(res)
@pytest.mark.p2
def test_params_empty(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
res = memory.list_memory_messages(**{})
assert len(res["messages"]["message_list"]) == 5, str(res)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 1, "page_size": 10}, 5),
({"page": 2, "page_size": 10}, 0),
({"page": 1, "page_size": 2}, 2),
({"page": 3, "page_size": 2}, 1),
({"page": 5, "page_size": 10}, 0),
],
ids=["normal_first_page", "beyond_max_page", "normal_last_partial_page", "normal_middle_page",
"full_data_single_page"],
)
def test_page_size(self, client, params, expected_page_size):
# have added 5 messages in fixture
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
res = memory.list_memory_messages(**params)
assert len(res["messages"]["message_list"]) == expected_page_size, str(res)
@pytest.mark.p2
def test_filter_agent_id(self, client):
memory_id = self.memory_id
agent_ids = self.agent_ids
agent_id = random.choice(agent_ids)
memory = Memory(client, {"id": memory_id})
res = memory.list_memory_messages(**{"agent_id": agent_id})
for message in res["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="Not support.")
def test_search_keyword(self, client):
if get_doc_engine(client) == "infinity":
pytest.skip("Not support.")
memory_id = self.memory_id
session_ids = self.session_ids
session_id = random.choice(session_ids)
slice_start = random.randint(0, len(session_id) - 2)
slice_end = random.randint(slice_start + 1, len(session_id) - 1)
keyword = session_id[slice_start:slice_end]
memory = Memory(client, {"id": memory_id})
res = memory.list_memory_messages(**{"keywords": keyword})
assert len(res["messages"]["message_list"]) > 0, res
for message in res["messages"]["message_list"]:
assert keyword in message["session_id"], message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_list_message.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_search_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from ragflow_sdk import RAGFlow, Memory
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.search_message("", ["empty_memory_id"])
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_with_multiple_type_message_func")
class TestSearchMessage:
@pytest.mark.p1
def test_query(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
list_res = memory.list_memory_messages()
assert list_res["messages"]["total_count"] > 0
query = "Coriander is a versatile herb with two main edible parts. What's its name can refer to?"
res = client.search_message(**{"memory_id": [memory_id], "query": query})
assert len(res) > 0
@pytest.mark.p2
def test_query_with_agent_filter(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
list_res = memory.list_memory_messages()
assert list_res["messages"]["total_count"] > 0
agent_id = self.agent_id
query = "Coriander is a versatile herb with two main edible parts. What's its name can refer to?"
res = client.search_message(**{"memory_id": [memory_id], "query": query, "agent_id": agent_id})
assert len(res) > 0
for message in res:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
def test_query_with_not_default_params(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
list_res = memory.list_memory_messages()
assert list_res["messages"]["total_count"] > 0
query = "Coriander is a versatile herb with two main edible parts. What's its name can refer to?"
params = {
"similarity_threshold": 0.1,
"keywords_similarity_weight": 0.6,
"top_n": 4
}
res = client.search_message(**{"memory_id": [memory_id], "query": query, **params})
assert len(res) > 0
assert len(res) <= params["top_n"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_search_message.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.