repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/llm/ocr_model.py | rag/llm/ocr_model.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
from typing import Any, Optional
from deepdoc.parser.mineru_parser import MinerUParser
class Base:
def __init__(self, key: str | dict, model_name: str, **kwargs):
self.model_name = model_name
def parse_pdf(self, filepath: str, binary=None, **kwargs) -> tuple[Any, Any]:
raise NotImplementedError("Please implement parse_pdf!")
class MinerUOcrModel(Base, MinerUParser):
_FACTORY_NAME = "MinerU"
def __init__(self, key: str | dict, model_name: str, **kwargs):
Base.__init__(self, key, model_name, **kwargs)
raw_config = {}
if key:
try:
raw_config = json.loads(key)
except Exception:
raw_config = {}
# nested {"api_key": {...}} from UI
# flat {"MINERU_*": "..."} payload auto-provisioned from env vars
config = raw_config.get("api_key", raw_config)
if not isinstance(config, dict):
config = {}
def _resolve_config(key: str, env_key: str, default=""):
# lower-case keys (UI), upper-case MINERU_* (env auto-provision), env vars
return config.get(key, config.get(env_key, os.environ.get(env_key, default)))
self.mineru_api = _resolve_config("mineru_apiserver", "MINERU_APISERVER", "")
self.mineru_output_dir = _resolve_config("mineru_output_dir", "MINERU_OUTPUT_DIR", "")
self.mineru_backend = _resolve_config("mineru_backend", "MINERU_BACKEND", "pipeline")
self.mineru_server_url = _resolve_config("mineru_server_url", "MINERU_SERVER_URL", "")
self.mineru_delete_output = bool(int(_resolve_config("mineru_delete_output", "MINERU_DELETE_OUTPUT", 1)))
# Redact sensitive config keys before logging
redacted_config = {}
for k, v in config.items():
if any(
sensitive_word in k.lower()
for sensitive_word in ("key", "password", "token", "secret")
):
redacted_config[k] = "[REDACTED]"
else:
redacted_config[k] = v
logging.info(
f"Parsed MinerU config (sensitive fields redacted): {redacted_config}"
)
MinerUParser.__init__(self, mineru_api=self.mineru_api, mineru_server_url=self.mineru_server_url)
def check_available(self, backend: Optional[str] = None, server_url: Optional[str] = None) -> tuple[bool, str]:
backend = backend or self.mineru_backend
server_url = server_url or self.mineru_server_url
return self.check_installation(backend=backend, server_url=server_url)
def parse_pdf(self, filepath: str, binary=None, callback=None, parse_method: str = "raw", **kwargs):
ok, reason = self.check_available()
if not ok:
raise RuntimeError(f"MinerU server not accessible: {reason}")
sections, tables = MinerUParser.parse_pdf(
self,
filepath=filepath,
binary=binary,
callback=callback,
output_dir=self.mineru_output_dir,
backend=self.mineru_backend,
server_url=self.mineru_server_url,
delete_output=self.mineru_delete_output,
parse_method=parse_method,
**kwargs
)
return sections, tables
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/llm/rerank_model.py | rag/llm/rerank_model.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from abc import ABC
from urllib.parse import urljoin
import httpx
import numpy as np
import requests
from yarl import URL
from common.log_utils import log_exception
from common.token_utils import num_tokens_from_string, truncate, total_token_count_from_response
class Base(ABC):
def __init__(self, key, model_name, **kwargs):
"""
Abstract base class constructor.
Parameters are not stored; initialization is left to subclasses.
"""
pass
def similarity(self, query: str, texts: list):
raise NotImplementedError("Please implement encode method!")
class JinaRerank(Base):
_FACTORY_NAME = "Jina"
def __init__(self, key, model_name="jina-reranker-v2-base-multilingual", base_url="https://api.jina.ai/v1/rerank"):
self.base_url = "https://api.jina.ai/v1/rerank"
self.headers = {"Content-Type": "application/json", "Authorization": f"Bearer {key}"}
self.model_name = model_name
def similarity(self, query: str, texts: list):
texts = [truncate(t, 8196) for t in texts]
data = {"model": self.model_name, "query": query, "documents": texts, "top_n": len(texts)}
res = requests.post(self.base_url, headers=self.headers, json=data).json()
rank = np.zeros(len(texts), dtype=float)
try:
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
except Exception as _e:
log_exception(_e, res)
return rank, total_token_count_from_response(res)
class XInferenceRerank(Base):
_FACTORY_NAME = "Xinference"
def __init__(self, key="x", model_name="", base_url=""):
if base_url.find("/v1") == -1:
base_url = urljoin(base_url, "/v1/rerank")
if base_url.find("/rerank") == -1:
base_url = urljoin(base_url, "/v1/rerank")
self.model_name = model_name
self.base_url = base_url
self.headers = {"Content-Type": "application/json", "accept": "application/json"}
if key and key != "x":
self.headers["Authorization"] = f"Bearer {key}"
def similarity(self, query: str, texts: list):
if len(texts) == 0:
return np.array([]), 0
pairs = [(query, truncate(t, 4096)) for t in texts]
token_count = 0
for _, t in pairs:
token_count += num_tokens_from_string(t)
data = {"model": self.model_name, "query": query, "return_documents": "true", "return_len": "true", "documents": texts}
res = requests.post(self.base_url, headers=self.headers, json=data).json()
rank = np.zeros(len(texts), dtype=float)
try:
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
except Exception as _e:
log_exception(_e, res)
return rank, token_count
class LocalAIRerank(Base):
_FACTORY_NAME = "LocalAI"
def __init__(self, key, model_name, base_url):
if base_url.find("/rerank") == -1:
self.base_url = urljoin(base_url, "/rerank")
else:
self.base_url = base_url
self.headers = {"Content-Type": "application/json", "Authorization": f"Bearer {key}"}
self.model_name = model_name.split("___")[0]
def similarity(self, query: str, texts: list):
# noway to config Ragflow , use fix setting
texts = [truncate(t, 500) for t in texts]
data = {
"model": self.model_name,
"query": query,
"documents": texts,
"top_n": len(texts),
}
token_count = 0
for t in texts:
token_count += num_tokens_from_string(t)
res = requests.post(self.base_url, headers=self.headers, json=data).json()
rank = np.zeros(len(texts), dtype=float)
try:
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
except Exception as _e:
log_exception(_e, res)
# Normalize the rank values to the range 0 to 1
min_rank = np.min(rank)
max_rank = np.max(rank)
# Avoid division by zero if all ranks are identical
if not np.isclose(min_rank, max_rank, atol=1e-3):
rank = (rank - min_rank) / (max_rank - min_rank)
else:
rank = np.zeros_like(rank)
return rank, token_count
class NvidiaRerank(Base):
_FACTORY_NAME = "NVIDIA"
def __init__(self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/"):
if not base_url:
base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/"
self.model_name = model_name
if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3":
self.base_url = urljoin(base_url, "nv-rerankqa-mistral-4b-v3/reranking")
if self.model_name == "nvidia/rerank-qa-mistral-4b":
self.base_url = urljoin(base_url, "reranking")
self.model_name = "nv-rerank-qa-mistral-4b:1"
self.headers = {
"accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {key}",
}
def similarity(self, query: str, texts: list):
token_count = num_tokens_from_string(query) + sum([num_tokens_from_string(t) for t in texts])
data = {
"model": self.model_name,
"query": {"text": query},
"passages": [{"text": text} for text in texts],
"truncate": "END",
"top_n": len(texts),
}
res = requests.post(self.base_url, headers=self.headers, json=data).json()
rank = np.zeros(len(texts), dtype=float)
try:
for d in res["rankings"]:
rank[d["index"]] = d["logit"]
except Exception as _e:
log_exception(_e, res)
return rank, token_count
class LmStudioRerank(Base):
_FACTORY_NAME = "LM-Studio"
def __init__(self, key, model_name, base_url, **kwargs):
pass
def similarity(self, query: str, texts: list):
raise NotImplementedError("The LmStudioRerank has not been implement")
class OpenAI_APIRerank(Base):
_FACTORY_NAME = "OpenAI-API-Compatible"
def __init__(self, key, model_name, base_url):
if base_url.find("/rerank") == -1:
self.base_url = urljoin(base_url, "/rerank")
else:
self.base_url = base_url
self.headers = {"Content-Type": "application/json", "Authorization": f"Bearer {key}"}
self.model_name = model_name.split("___")[0]
def similarity(self, query: str, texts: list):
# noway to config Ragflow , use fix setting
texts = [truncate(t, 500) for t in texts]
data = {
"model": self.model_name,
"query": query,
"documents": texts,
"top_n": len(texts),
}
token_count = 0
for t in texts:
token_count += num_tokens_from_string(t)
res = requests.post(self.base_url, headers=self.headers, json=data).json()
rank = np.zeros(len(texts), dtype=float)
try:
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
except Exception as _e:
log_exception(_e, res)
# Normalize the rank values to the range 0 to 1
min_rank = np.min(rank)
max_rank = np.max(rank)
# Avoid division by zero if all ranks are identical
if not np.isclose(min_rank, max_rank, atol=1e-3):
rank = (rank - min_rank) / (max_rank - min_rank)
else:
rank = np.zeros_like(rank)
return rank, token_count
class CoHereRerank(Base):
_FACTORY_NAME = ["Cohere", "VLLM"]
def __init__(self, key, model_name, base_url=None):
from cohere import Client
# Only pass base_url if it's a non-empty string, otherwise use default Cohere API endpoint
client_kwargs = {"api_key": key}
if base_url and base_url.strip():
client_kwargs["base_url"] = base_url
self.client = Client(**client_kwargs)
self.model_name = model_name.split("___")[0]
def similarity(self, query: str, texts: list):
token_count = num_tokens_from_string(query) + sum([num_tokens_from_string(t) for t in texts])
res = self.client.rerank(
model=self.model_name,
query=query,
documents=texts,
top_n=len(texts),
return_documents=False,
)
rank = np.zeros(len(texts), dtype=float)
try:
for d in res.results:
rank[d.index] = d.relevance_score
except Exception as _e:
log_exception(_e, res)
return rank, token_count
class TogetherAIRerank(Base):
_FACTORY_NAME = "TogetherAI"
def __init__(self, key, model_name, base_url, **kwargs):
pass
def similarity(self, query: str, texts: list):
raise NotImplementedError("The api has not been implement")
class SILICONFLOWRerank(Base):
_FACTORY_NAME = "SILICONFLOW"
def __init__(self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank"):
if not base_url:
base_url = "https://api.siliconflow.cn/v1/rerank"
self.model_name = model_name
self.base_url = base_url
self.headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {key}",
}
def similarity(self, query: str, texts: list):
payload = {
"model": self.model_name,
"query": query,
"documents": texts,
"top_n": len(texts),
"return_documents": False,
"max_chunks_per_doc": 1024,
"overlap_tokens": 80,
}
response = requests.post(self.base_url, json=payload, headers=self.headers).json()
rank = np.zeros(len(texts), dtype=float)
try:
for d in response["results"]:
rank[d["index"]] = d["relevance_score"]
except Exception as _e:
log_exception(_e, response)
return (
rank,
total_token_count_from_response(response),
)
class BaiduYiyanRerank(Base):
_FACTORY_NAME = "BaiduYiyan"
def __init__(self, key, model_name, base_url=None):
from qianfan.resources import Reranker
key = json.loads(key)
ak = key.get("yiyan_ak", "")
sk = key.get("yiyan_sk", "")
self.client = Reranker(ak=ak, sk=sk)
self.model_name = model_name
def similarity(self, query: str, texts: list):
res = self.client.do(
model=self.model_name,
query=query,
documents=texts,
top_n=len(texts),
).body
rank = np.zeros(len(texts), dtype=float)
try:
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
except Exception as _e:
log_exception(_e, res)
return rank, total_token_count_from_response(res)
class VoyageRerank(Base):
_FACTORY_NAME = "Voyage AI"
def __init__(self, key, model_name, base_url=None):
import voyageai
self.client = voyageai.Client(api_key=key)
self.model_name = model_name
def similarity(self, query: str, texts: list):
if not texts:
return np.array([]), 0
rank = np.zeros(len(texts), dtype=float)
res = self.client.rerank(query=query, documents=texts, model=self.model_name, top_k=len(texts))
try:
for r in res.results:
rank[r.index] = r.relevance_score
except Exception as _e:
log_exception(_e, res)
return rank, res.total_tokens
class QWenRerank(Base):
_FACTORY_NAME = "Tongyi-Qianwen"
def __init__(self, key, model_name="gte-rerank", base_url=None, **kwargs):
import dashscope
self.api_key = key
self.model_name = dashscope.TextReRank.Models.gte_rerank if model_name is None else model_name
def similarity(self, query: str, texts: list):
from http import HTTPStatus
import dashscope
resp = dashscope.TextReRank.call(api_key=self.api_key, model=self.model_name, query=query, documents=texts, top_n=len(texts), return_documents=False)
rank = np.zeros(len(texts), dtype=float)
if resp.status_code == HTTPStatus.OK:
try:
for r in resp.output.results:
rank[r.index] = r.relevance_score
except Exception as _e:
log_exception(_e, resp)
return rank, total_token_count_from_response(resp)
else:
raise ValueError(f"Error calling QWenRerank model {self.model_name}: {resp.status_code} - {resp.text}")
class HuggingfaceRerank(Base):
_FACTORY_NAME = "HuggingFace"
@staticmethod
def post(query: str, texts: list, url="127.0.0.1"):
exc = None
scores = [0 for _ in range(len(texts))]
batch_size = 8
for i in range(0, len(texts), batch_size):
try:
res = requests.post(
f"http://{url}/rerank", headers={"Content-Type": "application/json"}, json={"query": query, "texts": texts[i : i + batch_size], "raw_scores": False, "truncate": True}
)
for o in res.json():
scores[o["index"] + i] = o["score"]
except Exception as e:
exc = e
if exc:
raise exc
return np.array(scores)
def __init__(self, key, model_name="BAAI/bge-reranker-v2-m3", base_url="http://127.0.0.1"):
self.model_name = model_name.split("___")[0]
self.base_url = base_url
def similarity(self, query: str, texts: list) -> tuple[np.ndarray, int]:
if not texts:
return np.array([]), 0
token_count = 0
for t in texts:
token_count += num_tokens_from_string(t)
return HuggingfaceRerank.post(query, texts, self.base_url), token_count
class GPUStackRerank(Base):
_FACTORY_NAME = "GPUStack"
def __init__(self, key, model_name, base_url):
if not base_url:
raise ValueError("url cannot be None")
self.model_name = model_name
self.base_url = str(URL(base_url) / "v1" / "rerank")
self.headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {key}",
}
def similarity(self, query: str, texts: list):
payload = {
"model": self.model_name,
"query": query,
"documents": texts,
"top_n": len(texts),
}
try:
response = requests.post(self.base_url, json=payload, headers=self.headers)
response.raise_for_status()
response_json = response.json()
rank = np.zeros(len(texts), dtype=float)
token_count = 0
for t in texts:
token_count += num_tokens_from_string(t)
try:
for result in response_json["results"]:
rank[result["index"]] = result["relevance_score"]
except Exception as _e:
log_exception(_e, response)
return (
rank,
token_count,
)
except httpx.HTTPStatusError as e:
raise ValueError(f"Error calling GPUStackRerank model {self.model_name}: {e.response.status_code} - {e.response.text}")
class NovitaRerank(JinaRerank):
_FACTORY_NAME = "NovitaAI"
def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai/rerank"):
if not base_url:
base_url = "https://api.novita.ai/v3/openai/rerank"
super().__init__(key, model_name, base_url)
class GiteeRerank(JinaRerank):
_FACTORY_NAME = "GiteeAI"
def __init__(self, key, model_name, base_url="https://ai.gitee.com/v1/rerank"):
if not base_url:
base_url = "https://ai.gitee.com/v1/rerank"
super().__init__(key, model_name, base_url)
class Ai302Rerank(Base):
_FACTORY_NAME = "302.AI"
def __init__(self, key, model_name, base_url="https://api.302.ai/v1/rerank"):
if not base_url:
base_url = "https://api.302.ai/v1/rerank"
super().__init__(key, model_name, base_url)
class JiekouAIRerank(JinaRerank):
_FACTORY_NAME = "Jiekou.AI"
def __init__(self, key, model_name, base_url="https://api.jiekou.ai/openai/v1/rerank"):
if not base_url:
base_url = "https://api.jiekou.ai/openai/v1/rerank"
super().__init__(key, model_name, base_url)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/prompts/template.py | rag/prompts/template.py | import os
PROMPT_DIR = os.path.dirname(__file__)
_loaded_prompts = {}
def load_prompt(name: str) -> str:
if name in _loaded_prompts:
return _loaded_prompts[name]
path = os.path.join(PROMPT_DIR, f"{name}.md")
if not os.path.isfile(path):
raise FileNotFoundError(f"Prompt file '{name}.md' not found in prompts/ directory.")
with open(path, "r", encoding="utf-8") as f:
content = f.read().strip()
_loaded_prompts[name] = content
return content
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/prompts/generator.py | rag/prompts/generator.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import datetime
import json
import logging
import re
from copy import deepcopy
from typing import Tuple
import jinja2
import json_repair
from common.misc_utils import hash_str2int
from rag.nlp import rag_tokenizer
from rag.prompts.template import load_prompt
from common.constants import TAG_FLD
from common.token_utils import encoder, num_tokens_from_string
STOP_TOKEN = "<|STOP|>"
COMPLETE_TASK = "complete_task"
INPUT_UTILIZATION = 0.5
def get_value(d, k1, k2):
return d.get(k1, d.get(k2))
def chunks_format(reference):
return [
{
"id": get_value(chunk, "chunk_id", "id"),
"content": get_value(chunk, "content", "content_with_weight"),
"document_id": get_value(chunk, "doc_id", "document_id"),
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
"image_id": get_value(chunk, "image_id", "img_id"),
"positions": get_value(chunk, "positions", "position_int"),
"url": chunk.get("url"),
"similarity": chunk.get("similarity"),
"vector_similarity": chunk.get("vector_similarity"),
"term_similarity": chunk.get("term_similarity"),
"doc_type": get_value(chunk, "doc_type_kwd", "doc_type"),
}
for chunk in reference.get("chunks", [])
]
def message_fit_in(msg, max_length=4000):
def count():
nonlocal msg
tks_cnts = []
for m in msg:
tks_cnts.append({"role": m["role"], "count": num_tokens_from_string(m["content"])})
total = 0
for m in tks_cnts:
total += m["count"]
return total
c = count()
if c < max_length:
return c, msg
msg_ = [m for m in msg if m["role"] == "system"]
if len(msg) > 1:
msg_.append(msg[-1])
msg = msg_
c = count()
if c < max_length:
return c, msg
ll = num_tokens_from_string(msg_[0]["content"])
ll2 = num_tokens_from_string(msg_[-1]["content"])
if ll / (ll + ll2) > 0.8:
m = msg_[0]["content"]
m = encoder.decode(encoder.encode(m)[: max_length - ll2])
msg[0]["content"] = m
return max_length, msg
m = msg_[-1]["content"]
m = encoder.decode(encoder.encode(m)[: max_length - ll2])
msg[-1]["content"] = m
return max_length, msg
def kb_prompt(kbinfos, max_tokens, hash_id=False):
from api.db.services.document_service import DocumentService
knowledges = [get_value(ck, "content", "content_with_weight") for ck in kbinfos["chunks"]]
kwlg_len = len(knowledges)
used_token_count = 0
chunks_num = 0
for i, c in enumerate(knowledges):
if not c:
continue
used_token_count += num_tokens_from_string(c)
chunks_num += 1
if max_tokens * 0.97 < used_token_count:
knowledges = knowledges[:i]
logging.warning(f"Not all the retrieval into prompt: {len(knowledges)}/{kwlg_len}")
break
docs = DocumentService.get_by_ids([get_value(ck, "doc_id", "document_id") for ck in kbinfos["chunks"][:chunks_num]])
docs = {d.id: d.meta_fields for d in docs}
def draw_node(k, line):
if line is not None and not isinstance(line, str):
line = str(line)
if not line:
return ""
return f"\n├── {k}: " + re.sub(r"\n+", " ", line, flags=re.DOTALL)
knowledges = []
for i, ck in enumerate(kbinfos["chunks"][:chunks_num]):
cnt = "\nID: {}".format(i if not hash_id else hash_str2int(get_value(ck, "id", "chunk_id"), 500))
cnt += draw_node("Title", get_value(ck, "docnm_kwd", "document_name"))
cnt += draw_node("URL", ck['url']) if "url" in ck else ""
for k, v in docs.get(get_value(ck, "doc_id", "document_id"), {}).items():
cnt += draw_node(k, v)
cnt += "\n└── Content:\n"
cnt += get_value(ck, "content", "content_with_weight")
knowledges.append(cnt)
return knowledges
def memory_prompt(message_list, max_tokens):
used_token_count = 0
content_list = []
for message in message_list:
current_content_tokens = num_tokens_from_string(message["content"])
if used_token_count + current_content_tokens > max_tokens * 0.97:
logging.warning(f"Not all the retrieval into prompt: {len(content_list)}/{len(message_list)}")
break
content_list.append(message["content"])
used_token_count += current_content_tokens
return content_list
CITATION_PROMPT_TEMPLATE = load_prompt("citation_prompt")
CITATION_PLUS_TEMPLATE = load_prompt("citation_plus")
CONTENT_TAGGING_PROMPT_TEMPLATE = load_prompt("content_tagging_prompt")
CROSS_LANGUAGES_SYS_PROMPT_TEMPLATE = load_prompt("cross_languages_sys_prompt")
CROSS_LANGUAGES_USER_PROMPT_TEMPLATE = load_prompt("cross_languages_user_prompt")
FULL_QUESTION_PROMPT_TEMPLATE = load_prompt("full_question_prompt")
KEYWORD_PROMPT_TEMPLATE = load_prompt("keyword_prompt")
QUESTION_PROMPT_TEMPLATE = load_prompt("question_prompt")
VISION_LLM_DESCRIBE_PROMPT = load_prompt("vision_llm_describe_prompt")
VISION_LLM_FIGURE_DESCRIBE_PROMPT = load_prompt("vision_llm_figure_describe_prompt")
STRUCTURED_OUTPUT_PROMPT = load_prompt("structured_output_prompt")
ANALYZE_TASK_SYSTEM = load_prompt("analyze_task_system")
ANALYZE_TASK_USER = load_prompt("analyze_task_user")
NEXT_STEP = load_prompt("next_step")
REFLECT = load_prompt("reflect")
SUMMARY4MEMORY = load_prompt("summary4memory")
RANK_MEMORY = load_prompt("rank_memory")
META_FILTER = load_prompt("meta_filter")
ASK_SUMMARY = load_prompt("ask_summary")
PROMPT_JINJA_ENV = jinja2.Environment(autoescape=False, trim_blocks=True, lstrip_blocks=True)
def citation_prompt(user_defined_prompts: dict = {}) -> str:
template = PROMPT_JINJA_ENV.from_string(user_defined_prompts.get("citation_guidelines", CITATION_PROMPT_TEMPLATE))
return template.render()
def citation_plus(sources: str) -> str:
template = PROMPT_JINJA_ENV.from_string(CITATION_PLUS_TEMPLATE)
return template.render(example=citation_prompt(), sources=sources)
async def keyword_extraction(chat_mdl, content, topn=3):
template = PROMPT_JINJA_ENV.from_string(KEYWORD_PROMPT_TEMPLATE)
rendered_prompt = template.render(content=content, topn=topn)
msg = [{"role": "system", "content": rendered_prompt}, {"role": "user", "content": "Output: "}]
_, msg = message_fit_in(msg, chat_mdl.max_length)
kwd = await chat_mdl.async_chat(rendered_prompt, msg[1:], {"temperature": 0.2})
if isinstance(kwd, tuple):
kwd = kwd[0]
kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
if kwd.find("**ERROR**") >= 0:
return ""
return kwd
async def question_proposal(chat_mdl, content, topn=3):
template = PROMPT_JINJA_ENV.from_string(QUESTION_PROMPT_TEMPLATE)
rendered_prompt = template.render(content=content, topn=topn)
msg = [{"role": "system", "content": rendered_prompt}, {"role": "user", "content": "Output: "}]
_, msg = message_fit_in(msg, chat_mdl.max_length)
kwd = await chat_mdl.async_chat(rendered_prompt, msg[1:], {"temperature": 0.2})
if isinstance(kwd, tuple):
kwd = kwd[0]
kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
if kwd.find("**ERROR**") >= 0:
return ""
return kwd
async def full_question(tenant_id=None, llm_id=None, messages=[], language=None, chat_mdl=None):
from common.constants import LLMType
from api.db.services.llm_service import LLMBundle
from api.db.services.tenant_llm_service import TenantLLMService
if not chat_mdl:
if TenantLLMService.llm_id2llm_type(llm_id) == "image2text":
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
else:
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
conv = []
for m in messages:
if m["role"] not in ["user", "assistant"]:
continue
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
conversation = "\n".join(conv)
today = datetime.date.today().isoformat()
yesterday = (datetime.date.today() - datetime.timedelta(days=1)).isoformat()
tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).isoformat()
template = PROMPT_JINJA_ENV.from_string(FULL_QUESTION_PROMPT_TEMPLATE)
rendered_prompt = template.render(
today=today,
yesterday=yesterday,
tomorrow=tomorrow,
conversation=conversation,
language=language,
)
ans = await chat_mdl.async_chat(rendered_prompt, [{"role": "user", "content": "Output: "}])
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
async def cross_languages(tenant_id, llm_id, query, languages=[]):
from common.constants import LLMType
from api.db.services.llm_service import LLMBundle
from api.db.services.tenant_llm_service import TenantLLMService
if llm_id and TenantLLMService.llm_id2llm_type(llm_id) == "image2text":
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
else:
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
rendered_sys_prompt = PROMPT_JINJA_ENV.from_string(CROSS_LANGUAGES_SYS_PROMPT_TEMPLATE).render()
rendered_user_prompt = PROMPT_JINJA_ENV.from_string(CROSS_LANGUAGES_USER_PROMPT_TEMPLATE).render(query=query,
languages=languages)
ans = await chat_mdl.async_chat(rendered_sys_prompt, [{"role": "user", "content": rendered_user_prompt}],
{"temperature": 0.2})
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
if ans.find("**ERROR**") >= 0:
return query
return "\n".join([a for a in re.sub(r"(^Output:|\n+)", "", ans, flags=re.DOTALL).split("===") if a.strip()])
async def content_tagging(chat_mdl, content, all_tags, examples, topn=3):
template = PROMPT_JINJA_ENV.from_string(CONTENT_TAGGING_PROMPT_TEMPLATE)
for ex in examples:
ex["tags_json"] = json.dumps(ex[TAG_FLD], indent=2, ensure_ascii=False)
rendered_prompt = template.render(
topn=topn,
all_tags=all_tags,
examples=examples,
content=content,
)
msg = [{"role": "system", "content": rendered_prompt}, {"role": "user", "content": "Output: "}]
_, msg = message_fit_in(msg, chat_mdl.max_length)
kwd = await chat_mdl.async_chat(rendered_prompt, msg[1:], {"temperature": 0.5})
if isinstance(kwd, tuple):
kwd = kwd[0]
kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
if kwd.find("**ERROR**") >= 0:
raise Exception(kwd)
try:
obj = json_repair.loads(kwd)
except json_repair.JSONDecodeError:
try:
result = kwd.replace(rendered_prompt[:-1], "").replace("user", "").replace("model", "").strip()
result = "{" + result.split("{")[1].split("}")[0] + "}"
obj = json_repair.loads(result)
except Exception as e:
logging.exception(f"JSON parsing error: {result} -> {e}")
raise e
res = {}
for k, v in obj.items():
try:
if int(v) > 0:
res[str(k)] = int(v)
except Exception:
pass
return res
def vision_llm_describe_prompt(page=None) -> str:
template = PROMPT_JINJA_ENV.from_string(VISION_LLM_DESCRIBE_PROMPT)
return template.render(page=page)
def vision_llm_figure_describe_prompt() -> str:
template = PROMPT_JINJA_ENV.from_string(VISION_LLM_FIGURE_DESCRIBE_PROMPT)
return template.render()
def tool_schema(tools_description: list[dict], complete_task=False):
if not tools_description:
return ""
desc = {}
if complete_task:
desc[COMPLETE_TASK] = {
"type": "function",
"function": {
"name": COMPLETE_TASK,
"description": "When you have the final answer and are ready to complete the task, call this function with your answer",
"parameters": {
"type": "object",
"properties": {
"answer": {"type": "string", "description": "The final answer to the user's question"}},
"required": ["answer"]
}
}
}
for idx, tool in enumerate(tools_description):
name = tool["function"]["name"]
desc[name] = tool
return "\n\n".join([f"## {i + 1}. {fnm}\n{json.dumps(des, ensure_ascii=False, indent=4)}" for i, (fnm, des) in
enumerate(desc.items())])
def form_history(history, limit=-6):
context = ""
for h in history[limit:]:
if h["role"] == "system":
continue
role = "USER"
if h["role"].upper() != role:
role = "AGENT"
context += f"\n{role}: {h['content'][:2048] + ('...' if len(h['content']) > 2048 else '')}"
return context
async def analyze_task_async(chat_mdl, prompt, task_name, tools_description: list[dict],
user_defined_prompts: dict = {}):
tools_desc = tool_schema(tools_description)
context = ""
if user_defined_prompts.get("task_analysis"):
template = PROMPT_JINJA_ENV.from_string(user_defined_prompts["task_analysis"])
else:
template = PROMPT_JINJA_ENV.from_string(ANALYZE_TASK_SYSTEM + "\n\n" + ANALYZE_TASK_USER)
context = template.render(task=task_name, context=context, agent_prompt=prompt, tools_desc=tools_desc)
kwd = await chat_mdl.async_chat(context, [{"role": "user", "content": "Please analyze it."}])
if isinstance(kwd, tuple):
kwd = kwd[0]
kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
if kwd.find("**ERROR**") >= 0:
return ""
return kwd
async def next_step_async(chat_mdl, history: list, tools_description: list[dict], task_desc,
user_defined_prompts: dict = {}):
if not tools_description:
return "", 0
desc = tool_schema(tools_description)
template = PROMPT_JINJA_ENV.from_string(user_defined_prompts.get("plan_generation", NEXT_STEP))
user_prompt = "\nWhat's the next tool to call? If ready OR IMPOSSIBLE TO BE READY, then call `complete_task`."
hist = deepcopy(history)
if hist[-1]["role"] == "user":
hist[-1]["content"] += user_prompt
else:
hist.append({"role": "user", "content": user_prompt})
json_str = await chat_mdl.async_chat(
template.render(task_analysis=task_desc, desc=desc, today=datetime.datetime.now().strftime("%Y-%m-%d")),
hist[1:],
stop=["<|stop|>"],
)
tk_cnt = num_tokens_from_string(json_str)
json_str = re.sub(r"^.*</think>", "", json_str, flags=re.DOTALL)
return json_str, tk_cnt
async def reflect_async(chat_mdl, history: list[dict], tool_call_res: list[Tuple], user_defined_prompts: dict = {}):
tool_calls = [{"name": p[0], "result": p[1]} for p in tool_call_res]
goal = history[1]["content"]
template = PROMPT_JINJA_ENV.from_string(user_defined_prompts.get("reflection", REFLECT))
user_prompt = template.render(goal=goal, tool_calls=tool_calls)
hist = deepcopy(history)
if hist[-1]["role"] == "user":
hist[-1]["content"] += user_prompt
else:
hist.append({"role": "user", "content": user_prompt})
_, msg = message_fit_in(hist, chat_mdl.max_length)
ans = await chat_mdl.async_chat(msg[0]["content"], msg[1:])
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
return """
**Observation**
{}
**Reflection**
{}
""".format(json.dumps(tool_calls, ensure_ascii=False, indent=2), ans)
def form_message(system_prompt, user_prompt):
return [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
def structured_output_prompt(schema=None) -> str:
template = PROMPT_JINJA_ENV.from_string(STRUCTURED_OUTPUT_PROMPT)
return template.render(schema=schema)
async def tool_call_summary(chat_mdl, name: str, params: dict, result: str, user_defined_prompts: dict = {}) -> str:
template = PROMPT_JINJA_ENV.from_string(SUMMARY4MEMORY)
system_prompt = template.render(name=name,
params=json.dumps(params, ensure_ascii=False, indent=2),
result=result)
user_prompt = "→ Summary: "
_, msg = message_fit_in(form_message(system_prompt, user_prompt), chat_mdl.max_length)
ans = await chat_mdl.async_chat(msg[0]["content"], msg[1:])
return re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
async def rank_memories_async(chat_mdl, goal: str, sub_goal: str, tool_call_summaries: list[str],
user_defined_prompts: dict = {}):
template = PROMPT_JINJA_ENV.from_string(RANK_MEMORY)
system_prompt = template.render(goal=goal, sub_goal=sub_goal,
results=[{"i": i, "content": s} for i, s in enumerate(tool_call_summaries)])
user_prompt = " → rank: "
_, msg = message_fit_in(form_message(system_prompt, user_prompt), chat_mdl.max_length)
ans = await chat_mdl.async_chat(msg[0]["content"], msg[1:], stop="<|stop|>")
return re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
async def gen_meta_filter(chat_mdl, meta_data: dict, query: str) -> dict:
meta_data_structure = {}
for key, values in meta_data.items():
meta_data_structure[key] = list(values.keys()) if isinstance(values, dict) else values
sys_prompt = PROMPT_JINJA_ENV.from_string(META_FILTER).render(
current_date=datetime.datetime.today().strftime('%Y-%m-%d'),
metadata_keys=json.dumps(meta_data_structure),
user_question=query
)
user_prompt = "Generate filters:"
ans = await chat_mdl.async_chat(sys_prompt, [{"role": "user", "content": user_prompt}])
ans = re.sub(r"(^.*</think>|```json\n|```\n*$)", "", ans, flags=re.DOTALL)
try:
ans = json_repair.loads(ans)
assert isinstance(ans, dict), ans
assert "conditions" in ans and isinstance(ans["conditions"], list), ans
return ans
except Exception:
logging.exception(f"Loading json failure: {ans}")
return {"conditions": []}
async def gen_json(system_prompt: str, user_prompt: str, chat_mdl, gen_conf=None):
from graphrag.utils import get_llm_cache, set_llm_cache
cached = get_llm_cache(chat_mdl.llm_name, system_prompt, user_prompt, gen_conf)
if cached:
return json_repair.loads(cached)
_, msg = message_fit_in(form_message(system_prompt, user_prompt), chat_mdl.max_length)
ans = await chat_mdl.async_chat(msg[0]["content"], msg[1:], gen_conf=gen_conf)
ans = re.sub(r"(^.*</think>|```json\n|```\n*$)", "", ans, flags=re.DOTALL)
try:
res = json_repair.loads(ans)
set_llm_cache(chat_mdl.llm_name, system_prompt, ans, user_prompt, gen_conf)
return res
except Exception:
logging.exception(f"Loading json failure: {ans}")
TOC_DETECTION = load_prompt("toc_detection")
async def detect_table_of_contents(page_1024: list[str], chat_mdl):
toc_secs = []
for i, sec in enumerate(page_1024[:22]):
ans = await gen_json(PROMPT_JINJA_ENV.from_string(TOC_DETECTION).render(page_txt=sec), "Only JSON please.",
chat_mdl)
if toc_secs and not ans["exists"]:
break
toc_secs.append(sec)
return toc_secs
TOC_EXTRACTION = load_prompt("toc_extraction")
TOC_EXTRACTION_CONTINUE = load_prompt("toc_extraction_continue")
async def extract_table_of_contents(toc_pages, chat_mdl):
if not toc_pages:
return []
return await gen_json(PROMPT_JINJA_ENV.from_string(TOC_EXTRACTION).render(toc_page="\n".join(toc_pages)),
"Only JSON please.", chat_mdl)
async def toc_index_extractor(toc: list[dict], content: str, chat_mdl):
tob_extractor_prompt = """
You are given a table of contents in a json format and several pages of a document, your job is to add the physical_index to the table of contents in the json format.
The provided pages contains tags like <physical_index_X> and <physical_index_X> to indicate the physical location of the page X.
The structure variable is the numeric system which represents the index of the hierarchy section in the table of contents. For example, the first section has structure index 1, the first subsection has structure index 1.1, the second subsection has structure index 1.2, etc.
The response should be in the following JSON format:
[
{
"structure": <structure index, "x.x.x" or None> (string),
"title": <title of the section>,
"physical_index": "<physical_index_X>" (keep the format)
},
...
]
Only add the physical_index to the sections that are in the provided pages.
If the title of the section are not in the provided pages, do not add the physical_index to it.
Directly return the final JSON structure. Do not output anything else."""
prompt = tob_extractor_prompt + '\nTable of contents:\n' + json.dumps(toc, ensure_ascii=False,
indent=2) + '\nDocument pages:\n' + content
return await gen_json(prompt, "Only JSON please.", chat_mdl)
TOC_INDEX = load_prompt("toc_index")
async def table_of_contents_index(toc_arr: list[dict], sections: list[str], chat_mdl):
if not toc_arr or not sections:
return []
toc_map = {}
for i, it in enumerate(toc_arr):
k1 = (it["structure"] + it["title"]).replace(" ", "")
k2 = it["title"].strip()
if k1 not in toc_map:
toc_map[k1] = []
if k2 not in toc_map:
toc_map[k2] = []
toc_map[k1].append(i)
toc_map[k2].append(i)
for it in toc_arr:
it["indices"] = []
for i, sec in enumerate(sections):
sec = sec.strip()
if sec.replace(" ", "") in toc_map:
for j in toc_map[sec.replace(" ", "")]:
toc_arr[j]["indices"].append(i)
all_pathes = []
def dfs(start, path):
nonlocal all_pathes
if start >= len(toc_arr):
if path:
all_pathes.append(path)
return
if not toc_arr[start]["indices"]:
dfs(start + 1, path)
return
added = False
for j in toc_arr[start]["indices"]:
if path and j < path[-1][0]:
continue
_path = deepcopy(path)
_path.append((j, start))
added = True
dfs(start + 1, _path)
if not added and path:
all_pathes.append(path)
dfs(0, [])
path = max(all_pathes, key=lambda x: len(x))
for it in toc_arr:
it["indices"] = []
for j, i in path:
toc_arr[i]["indices"] = [j]
print(json.dumps(toc_arr, ensure_ascii=False, indent=2))
i = 0
while i < len(toc_arr):
it = toc_arr[i]
if it["indices"]:
i += 1
continue
if i > 0 and toc_arr[i - 1]["indices"]:
st_i = toc_arr[i - 1]["indices"][-1]
else:
st_i = 0
e = i + 1
while e < len(toc_arr) and not toc_arr[e]["indices"]:
e += 1
if e >= len(toc_arr):
e = len(sections)
else:
e = toc_arr[e]["indices"][0]
for j in range(st_i, min(e + 1, len(sections))):
ans = await gen_json(PROMPT_JINJA_ENV.from_string(TOC_INDEX).render(
structure=it["structure"],
title=it["title"],
text=sections[j]), "Only JSON please.", chat_mdl)
if ans["exist"] == "yes":
it["indices"].append(j)
break
i += 1
return toc_arr
async def check_if_toc_transformation_is_complete(content, toc, chat_mdl):
prompt = """
You are given a raw table of contents and a table of contents.
Your job is to check if the table of contents is complete.
Reply format:
{{
"thinking": <why do you think the cleaned table of contents is complete or not>
"completed": "yes" or "no"
}}
Directly return the final JSON structure. Do not output anything else."""
prompt = prompt + '\n Raw Table of contents:\n' + content + '\n Cleaned Table of contents:\n' + toc
response = await gen_json(prompt, "Only JSON please.", chat_mdl)
return response['completed']
async def toc_transformer(toc_pages, chat_mdl):
init_prompt = """
You are given a table of contents, You job is to transform the whole table of content into a JSON format included table_of_contents.
The `structure` is the numeric system which represents the index of the hierarchy section in the table of contents. For example, the first section has structure index 1, the first subsection has structure index 1.1, the second subsection has structure index 1.2, etc.
The `title` is a short phrase or a several-words term.
The response should be in the following JSON format:
[
{
"structure": <structure index, "x.x.x" or None> (string),
"title": <title of the section>
},
...
],
You should transform the full table of contents in one go.
Directly return the final JSON structure, do not output anything else. """
toc_content = "\n".join(toc_pages)
prompt = init_prompt + '\n Given table of contents\n:' + toc_content
def clean_toc(arr):
for a in arr:
a["title"] = re.sub(r"[.·….]{2,}", "", a["title"])
last_complete = await gen_json(prompt, "Only JSON please.", chat_mdl)
if_complete = await check_if_toc_transformation_is_complete(toc_content,
json.dumps(last_complete, ensure_ascii=False, indent=2),
chat_mdl)
clean_toc(last_complete)
if if_complete == "yes":
return last_complete
while not (if_complete == "yes"):
prompt = f"""
Your task is to continue the table of contents json structure, directly output the remaining part of the json structure.
The response should be in the following JSON format:
The raw table of contents json structure is:
{toc_content}
The incomplete transformed table of contents json structure is:
{json.dumps(last_complete[-24:], ensure_ascii=False, indent=2)}
Please continue the json structure, directly output the remaining part of the json structure."""
new_complete = await gen_json(prompt, "Only JSON please.", chat_mdl)
if not new_complete or str(last_complete).find(str(new_complete)) >= 0:
break
clean_toc(new_complete)
last_complete.extend(new_complete)
if_complete = await check_if_toc_transformation_is_complete(toc_content,
json.dumps(last_complete, ensure_ascii=False,
indent=2), chat_mdl)
return last_complete
TOC_LEVELS = load_prompt("assign_toc_levels")
async def assign_toc_levels(toc_secs, chat_mdl, gen_conf={"temperature": 0.2}):
if not toc_secs:
return []
return await gen_json(
PROMPT_JINJA_ENV.from_string(TOC_LEVELS).render(),
str(toc_secs),
chat_mdl,
gen_conf
)
TOC_FROM_TEXT_SYSTEM = load_prompt("toc_from_text_system")
TOC_FROM_TEXT_USER = load_prompt("toc_from_text_user")
# Generate TOC from text chunks with text llms
async def gen_toc_from_text(txt_info: dict, chat_mdl, callback=None):
if callback:
callback(msg="")
try:
ans = await gen_json(
PROMPT_JINJA_ENV.from_string(TOC_FROM_TEXT_SYSTEM).render(),
PROMPT_JINJA_ENV.from_string(TOC_FROM_TEXT_USER).render(
text="\n".join([json.dumps(d, ensure_ascii=False) for d in txt_info["chunks"]])),
chat_mdl,
gen_conf={"temperature": 0.0, "top_p": 0.9}
)
txt_info["toc"] = ans if ans and not isinstance(ans, str) else []
except Exception as e:
logging.exception(e)
def split_chunks(chunks, max_length: int):
"""
Pack chunks into batches according to max_length, returning [{"id": idx, "text": chunk_text}, ...].
Do not split a single chunk, even if it exceeds max_length.
"""
result = []
batch, batch_tokens = [], 0
for idx, chunk in enumerate(chunks):
t = num_tokens_from_string(chunk)
if batch_tokens + t > max_length:
result.append(batch)
batch, batch_tokens = [], 0
batch.append({idx: chunk})
batch_tokens += t
if batch:
result.append(batch)
return result
async def run_toc_from_text(chunks, chat_mdl, callback=None):
input_budget = int(chat_mdl.max_length * INPUT_UTILIZATION) - num_tokens_from_string(
TOC_FROM_TEXT_USER + TOC_FROM_TEXT_SYSTEM
)
input_budget = 1024 if input_budget > 1024 else input_budget
chunk_sections = split_chunks(chunks, input_budget)
titles = []
chunks_res = []
tasks = []
for i, chunk in enumerate(chunk_sections):
if not chunk:
continue
chunks_res.append({"chunks": chunk})
tasks.append(asyncio.create_task(gen_toc_from_text(chunks_res[-1], chat_mdl, callback)))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error generating TOC: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
for chunk in chunks_res:
titles.extend(chunk.get("toc", []))
# Filter out entries with title == -1
prune = len(titles) > 512
max_len = 12 if prune else 22
filtered = []
for x in titles:
if not isinstance(x, dict) or not x.get("title") or x["title"] == "-1":
continue
if len(rag_tokenizer.tokenize(x["title"]).split(" ")) > max_len:
continue
if re.match(r"[0-9,.()/ -]+$", x["title"]):
continue
filtered.append(x)
logging.info(f"\n\nFiltered TOC sections:\n{filtered}")
if not filtered:
return []
# Generate initial level (level/title)
raw_structure = [x.get("title", "") for x in filtered]
# Assign hierarchy levels using LLM
toc_with_levels = await assign_toc_levels(raw_structure, chat_mdl, {"temperature": 0.0, "top_p": 0.9})
if not toc_with_levels:
return []
# Merge structure and content (by index)
prune = len(toc_with_levels) > 512
max_lvl = "0"
sorted_list = sorted([t.get("level", "0") for t in toc_with_levels if isinstance(t, dict)])
if sorted_list:
max_lvl = sorted_list[-1]
merged = []
for _, (toc_item, src_item) in enumerate(zip(toc_with_levels, filtered)):
if prune and toc_item.get("level", "0") >= max_lvl:
continue
merged.append({
"level": toc_item.get("level", "0"),
"title": toc_item.get("title", ""),
"chunk_id": src_item.get("chunk_id", ""),
})
return merged
TOC_RELEVANCE_SYSTEM = load_prompt("toc_relevance_system")
TOC_RELEVANCE_USER = load_prompt("toc_relevance_user")
async def relevant_chunks_with_toc(query: str, toc: list[dict], chat_mdl, topn: int = 6):
import numpy as np
try:
ans = await gen_json(
PROMPT_JINJA_ENV.from_string(TOC_RELEVANCE_SYSTEM).render(),
PROMPT_JINJA_ENV.from_string(TOC_RELEVANCE_USER).render(query=query, toc_json="[\n%s\n]\n" % "\n".join(
[json.dumps({"level": d["level"], "title": d["title"]}, ensure_ascii=False) for d in toc])),
chat_mdl,
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/prompts/__init__.py | rag/prompts/__init__.py | from . import generator
__all__ = [name for name in dir(generator)
if not name.startswith('_')]
globals().update({name: getattr(generator, name) for name in __all__})
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/search.py | rag/nlp/search.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import json
import logging
import re
import math
from collections import OrderedDict, defaultdict
from dataclasses import dataclass
from rag.prompts.generator import relevant_chunks_with_toc
from rag.nlp import rag_tokenizer, query
import numpy as np
from common.doc_store.doc_store_base import MatchDenseExpr, FusionExpr, OrderByExpr, DocStoreConnection
from common.string_utils import remove_redundant_spaces
from common.float_utils import get_float
from common.constants import PAGERANK_FLD, TAG_FLD
from common import settings
def index_name(uid): return f"ragflow_{uid}"
class Dealer:
def __init__(self, dataStore: DocStoreConnection):
self.qryr = query.FulltextQueryer()
self.dataStore = dataStore
@dataclass
class SearchResult:
total: int
ids: list[str]
query_vector: list[float] | None = None
field: dict | None = None
highlight: dict | None = None
aggregation: list | dict | None = None
keywords: list[str] | None = None
group_docs: list[list] | None = None
def get_vector(self, txt, emb_mdl, topk=10, similarity=0.1):
qv, _ = emb_mdl.encode_queries(txt)
shape = np.array(qv).shape
if len(shape) > 1:
raise Exception(
f"Dealer.get_vector returned array's shape {shape} doesn't match expectation(exact one dimension).")
embedding_data = [get_float(v) for v in qv]
vector_column_name = f"q_{len(embedding_data)}_vec"
return MatchDenseExpr(vector_column_name, embedding_data, 'float', 'cosine', topk, {"similarity": similarity})
def get_filters(self, req):
condition = dict()
for key, field in {"kb_ids": "kb_id", "doc_ids": "doc_id"}.items():
if key in req and req[key] is not None:
condition[field] = req[key]
# TODO(yzc): `available_int` is nullable however infinity doesn't support nullable columns.
for key in ["knowledge_graph_kwd", "available_int", "entity_kwd", "from_entity_kwd", "to_entity_kwd",
"removed_kwd"]:
if key in req and req[key] is not None:
condition[key] = req[key]
return condition
def search(self, req, idx_names: str | list[str],
kb_ids: list[str],
emb_mdl=None,
highlight: bool | list | None = None,
rank_feature: dict | None = None
):
if highlight is None:
highlight = False
filters = self.get_filters(req)
orderBy = OrderByExpr()
pg = int(req.get("page", 1)) - 1
topk = int(req.get("topk", 1024))
ps = int(req.get("size", topk))
offset, limit = pg * ps, ps
src = req.get("fields",
["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", "position_int",
"doc_id", "page_num_int", "top_int", "create_timestamp_flt", "knowledge_graph_kwd",
"question_kwd", "question_tks", "doc_type_kwd",
"available_int", "content_with_weight", "mom_id", PAGERANK_FLD, TAG_FLD])
kwds = set([])
qst = req.get("question", "")
q_vec = []
if not qst:
if req.get("sort"):
orderBy.asc("page_num_int")
orderBy.asc("top_int")
orderBy.desc("create_timestamp_flt")
res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids)
total = self.dataStore.get_total(res)
logging.debug("Dealer.search TOTAL: {}".format(total))
else:
highlightFields = ["content_ltks", "title_tks"]
if not highlight:
highlightFields = []
elif isinstance(highlight, list):
highlightFields = highlight
matchText, keywords = self.qryr.question(qst, min_match=0.3)
if emb_mdl is None:
matchExprs = [matchText]
res = self.dataStore.search(src, highlightFields, filters, matchExprs, orderBy, offset, limit,
idx_names, kb_ids, rank_feature=rank_feature)
total = self.dataStore.get_total(res)
logging.debug("Dealer.search TOTAL: {}".format(total))
else:
matchDense = self.get_vector(qst, emb_mdl, topk, req.get("similarity", 0.1))
q_vec = matchDense.embedding_data
if not settings.DOC_ENGINE_INFINITY:
src.append(f"q_{len(q_vec)}_vec")
fusionExpr = FusionExpr("weighted_sum", topk, {"weights": "0.05,0.95"})
matchExprs = [matchText, matchDense, fusionExpr]
res = self.dataStore.search(src, highlightFields, filters, matchExprs, orderBy, offset, limit,
idx_names, kb_ids, rank_feature=rank_feature)
total = self.dataStore.get_total(res)
logging.debug("Dealer.search TOTAL: {}".format(total))
# If result is empty, try again with lower min_match
if total == 0:
if filters.get("doc_id"):
res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids)
total = self.dataStore.get_total(res)
else:
matchText, _ = self.qryr.question(qst, min_match=0.1)
matchDense.extra_options["similarity"] = 0.17
res = self.dataStore.search(src, highlightFields, filters, [matchText, matchDense, fusionExpr],
orderBy, offset, limit, idx_names, kb_ids,
rank_feature=rank_feature)
total = self.dataStore.get_total(res)
logging.debug("Dealer.search 2 TOTAL: {}".format(total))
for k in keywords:
kwds.add(k)
for kk in rag_tokenizer.fine_grained_tokenize(k).split():
if len(kk) < 2:
continue
if kk in kwds:
continue
kwds.add(kk)
logging.debug(f"TOTAL: {total}")
ids = self.dataStore.get_doc_ids(res)
keywords = list(kwds)
highlight = self.dataStore.get_highlight(res, keywords, "content_with_weight")
aggs = self.dataStore.get_aggregation(res, "docnm_kwd")
return self.SearchResult(
total=total,
ids=ids,
query_vector=q_vec,
aggregation=aggs,
highlight=highlight,
field=self.dataStore.get_fields(res, src + ["_score"]),
keywords=keywords
)
@staticmethod
def trans2floats(txt):
return [get_float(t) for t in txt.split("\t")]
def insert_citations(self, answer, chunks, chunk_v,
embd_mdl, tkweight=0.1, vtweight=0.9):
assert len(chunks) == len(chunk_v)
if not chunks:
return answer, set([])
pieces = re.split(r"(```)", answer)
if len(pieces) >= 3:
i = 0
pieces_ = []
while i < len(pieces):
if pieces[i] == "```":
st = i
i += 1
while i < len(pieces) and pieces[i] != "```":
i += 1
if i < len(pieces):
i += 1
pieces_.append("".join(pieces[st: i]) + "\n")
else:
pieces_.extend(
re.split(
r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])",
pieces[i]))
i += 1
pieces = pieces_
else:
pieces = re.split(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", answer)
for i in range(1, len(pieces)):
if re.match(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", pieces[i]):
pieces[i - 1] += pieces[i][0]
pieces[i] = pieces[i][1:]
idx = []
pieces_ = []
for i, t in enumerate(pieces):
if len(t) < 5:
continue
idx.append(i)
pieces_.append(t)
logging.debug("{} => {}".format(answer, pieces_))
if not pieces_:
return answer, set([])
ans_v, _ = embd_mdl.encode(pieces_)
for i in range(len(chunk_v)):
if len(ans_v[0]) != len(chunk_v[i]):
chunk_v[i] = [0.0] * len(ans_v[0])
logging.warning(
"The dimension of query and chunk do not match: {} vs. {}".format(len(ans_v[0]), len(chunk_v[i])))
assert len(ans_v[0]) == len(chunk_v[0]), "The dimension of query and chunk do not match: {} vs. {}".format(
len(ans_v[0]), len(chunk_v[0]))
chunks_tks = [rag_tokenizer.tokenize(self.qryr.rmWWW(ck)).split()
for ck in chunks]
cites = {}
thr = 0.63
while thr > 0.3 and len(cites.keys()) == 0 and pieces_ and chunks_tks:
for i, a in enumerate(pieces_):
sim, tksim, vtsim = self.qryr.hybrid_similarity(ans_v[i],
chunk_v,
rag_tokenizer.tokenize(
self.qryr.rmWWW(pieces_[i])).split(),
chunks_tks,
tkweight, vtweight)
mx = np.max(sim) * 0.99
logging.debug("{} SIM: {}".format(pieces_[i], mx))
if mx < thr:
continue
cites[idx[i]] = list(
set([str(ii) for ii in range(len(chunk_v)) if sim[ii] > mx]))[:4]
thr *= 0.8
res = ""
seted = set([])
for i, p in enumerate(pieces):
res += p
if i not in idx:
continue
if i not in cites:
continue
for c in cites[i]:
assert int(c) < len(chunk_v)
for c in cites[i]:
if c in seted:
continue
res += f" [ID:{c}]"
seted.add(c)
return res, seted
def _rank_feature_scores(self, query_rfea, search_res):
## For rank feature(tag_fea) scores.
rank_fea = []
pageranks = []
for chunk_id in search_res.ids:
pageranks.append(search_res.field[chunk_id].get(PAGERANK_FLD, 0))
pageranks = np.array(pageranks, dtype=float)
if not query_rfea:
return np.array([0 for _ in range(len(search_res.ids))]) + pageranks
q_denor = np.sqrt(np.sum([s * s for t, s in query_rfea.items() if t != PAGERANK_FLD]))
for i in search_res.ids:
nor, denor = 0, 0
if not search_res.field[i].get(TAG_FLD):
rank_fea.append(0)
continue
for t, sc in eval(search_res.field[i].get(TAG_FLD, "{}")).items():
if t in query_rfea:
nor += query_rfea[t] * sc
denor += sc * sc
if denor == 0:
rank_fea.append(0)
else:
rank_fea.append(nor / np.sqrt(denor) / q_denor)
return np.array(rank_fea) * 10. + pageranks
def rerank(self, sres, query, tkweight=0.3,
vtweight=0.7, cfield="content_ltks",
rank_feature: dict | None = None
):
_, keywords = self.qryr.question(query)
vector_size = len(sres.query_vector)
vector_column = f"q_{vector_size}_vec"
zero_vector = [0.0] * vector_size
ins_embd = []
for chunk_id in sres.ids:
vector = sres.field[chunk_id].get(vector_column, zero_vector)
if isinstance(vector, str):
vector = [get_float(v) for v in vector.split("\t")]
ins_embd.append(vector)
if not ins_embd:
return [], [], []
for i in sres.ids:
if isinstance(sres.field[i].get("important_kwd", []), str):
sres.field[i]["important_kwd"] = [sres.field[i]["important_kwd"]]
ins_tw = []
for i in sres.ids:
content_ltks = list(OrderedDict.fromkeys(sres.field[i][cfield].split()))
title_tks = [t for t in sres.field[i].get("title_tks", "").split() if t]
question_tks = [t for t in sres.field[i].get("question_tks", "").split() if t]
important_kwd = sres.field[i].get("important_kwd", [])
tks = content_ltks + title_tks * 2 + important_kwd * 5 + question_tks * 6
ins_tw.append(tks)
## For rank feature(tag_fea) scores.
rank_fea = self._rank_feature_scores(rank_feature, sres)
sim, tksim, vtsim = self.qryr.hybrid_similarity(sres.query_vector,
ins_embd,
keywords,
ins_tw, tkweight, vtweight)
return sim + rank_fea, tksim, vtsim
def rerank_by_model(self, rerank_mdl, sres, query, tkweight=0.3,
vtweight=0.7, cfield="content_ltks",
rank_feature: dict | None = None):
_, keywords = self.qryr.question(query)
for i in sres.ids:
if isinstance(sres.field[i].get("important_kwd", []), str):
sres.field[i]["important_kwd"] = [sres.field[i]["important_kwd"]]
ins_tw = []
for i in sres.ids:
content_ltks = sres.field[i][cfield].split()
title_tks = [t for t in sres.field[i].get("title_tks", "").split() if t]
important_kwd = sres.field[i].get("important_kwd", [])
tks = content_ltks + title_tks + important_kwd
ins_tw.append(tks)
tksim = self.qryr.token_similarity(keywords, ins_tw)
vtsim, _ = rerank_mdl.similarity(query, [remove_redundant_spaces(" ".join(tks)) for tks in ins_tw])
## For rank feature(tag_fea) scores.
rank_fea = self._rank_feature_scores(rank_feature, sres)
return tkweight * np.array(tksim) + vtweight * vtsim + rank_fea, tksim, vtsim
def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
return self.qryr.hybrid_similarity(ans_embd,
ins_embd,
rag_tokenizer.tokenize(ans).split(),
rag_tokenizer.tokenize(inst).split())
def retrieval(
self,
question,
embd_mdl,
tenant_ids,
kb_ids,
page,
page_size,
similarity_threshold=0.2,
vector_similarity_weight=0.3,
top=1024,
doc_ids=None,
aggs=True,
rerank_mdl=None,
highlight=False,
rank_feature: dict | None = {PAGERANK_FLD: 10},
):
ranks = {"total": 0, "chunks": [], "doc_aggs": {}}
if not question:
return ranks
# Ensure RERANK_LIMIT is multiple of page_size
RERANK_LIMIT = math.ceil(64 / page_size) * page_size if page_size > 1 else 1
req = {
"kb_ids": kb_ids,
"doc_ids": doc_ids,
"page": math.ceil(page_size * page / RERANK_LIMIT),
"size": RERANK_LIMIT,
"question": question,
"vector": True,
"topk": top,
"similarity": similarity_threshold,
"available_int": 1,
}
if isinstance(tenant_ids, str):
tenant_ids = tenant_ids.split(",")
sres = self.search(req, [index_name(tid) for tid in tenant_ids], kb_ids, embd_mdl, highlight,
rank_feature=rank_feature)
if rerank_mdl and sres.total > 0:
sim, tsim, vsim = self.rerank_by_model(
rerank_mdl,
sres,
question,
1 - vector_similarity_weight,
vector_similarity_weight,
rank_feature=rank_feature,
)
else:
if settings.DOC_ENGINE_INFINITY:
# Don't need rerank here since Infinity normalizes each way score before fusion.
sim = [sres.field[id].get("_score", 0.0) for id in sres.ids]
sim = [s if s is not None else 0.0 for s in sim]
tsim = sim
vsim = sim
else:
# ElasticSearch doesn't normalize each way score before fusion.
sim, tsim, vsim = self.rerank(
sres,
question,
1 - vector_similarity_weight,
vector_similarity_weight,
rank_feature=rank_feature,
)
sim_np = np.array(sim, dtype=np.float64)
if sim_np.size == 0:
ranks["doc_aggs"] = []
return ranks
sorted_idx = np.argsort(sim_np * -1)
valid_idx = [int(i) for i in sorted_idx if sim_np[i] >= similarity_threshold]
filtered_count = len(valid_idx)
ranks["total"] = int(filtered_count)
if filtered_count == 0:
ranks["doc_aggs"] = []
return ranks
max_pages = max(RERANK_LIMIT // max(page_size, 1), 1)
page_index = (page - 1) % max_pages
begin = page_index * page_size
end = begin + page_size
page_idx = valid_idx[begin:end]
dim = len(sres.query_vector)
vector_column = f"q_{dim}_vec"
zero_vector = [0.0] * dim
for i in page_idx:
id = sres.ids[i]
chunk = sres.field[id]
dnm = chunk.get("docnm_kwd", "")
did = chunk.get("doc_id", "")
position_int = chunk.get("position_int", [])
d = {
"chunk_id": id,
"content_ltks": chunk["content_ltks"],
"content_with_weight": chunk["content_with_weight"],
"doc_id": did,
"docnm_kwd": dnm,
"kb_id": chunk["kb_id"],
"important_kwd": chunk.get("important_kwd", []),
"image_id": chunk.get("img_id", ""),
"similarity": float(sim_np[i]),
"vector_similarity": float(vsim[i]),
"term_similarity": float(tsim[i]),
"vector": chunk.get(vector_column, zero_vector),
"positions": position_int,
"doc_type_kwd": chunk.get("doc_type_kwd", ""),
"mom_id": chunk.get("mom_id", ""),
}
if highlight and sres.highlight:
if id in sres.highlight:
d["highlight"] = remove_redundant_spaces(sres.highlight[id])
else:
d["highlight"] = d["content_with_weight"]
ranks["chunks"].append(d)
if aggs:
for i in valid_idx:
id = sres.ids[i]
chunk = sres.field[id]
dnm = chunk.get("docnm_kwd", "")
did = chunk.get("doc_id", "")
if dnm not in ranks["doc_aggs"]:
ranks["doc_aggs"][dnm] = {"doc_id": did, "count": 0}
ranks["doc_aggs"][dnm]["count"] += 1
ranks["doc_aggs"] = [
{
"doc_name": k,
"doc_id": v["doc_id"],
"count": v["count"],
}
for k, v in sorted(
ranks["doc_aggs"].items(),
key=lambda x: x[1]["count"] * -1,
)
]
else:
ranks["doc_aggs"] = []
return ranks
def sql_retrieval(self, sql, fetch_size=128, format="json"):
tbl = self.dataStore.sql(sql, fetch_size, format)
return tbl
def chunk_list(self, doc_id: str, tenant_id: str,
kb_ids: list[str], max_count=1024,
offset=0,
fields=["docnm_kwd", "content_with_weight", "img_id"],
sort_by_position: bool = False):
condition = {"doc_id": doc_id}
fields_set = set(fields or [])
if sort_by_position:
for need in ("page_num_int", "position_int", "top_int"):
if need not in fields_set:
fields_set.add(need)
fields = list(fields_set)
orderBy = OrderByExpr()
if sort_by_position:
orderBy.asc("page_num_int")
orderBy.asc("position_int")
orderBy.asc("top_int")
res = []
bs = 128
for p in range(offset, max_count, bs):
es_res = self.dataStore.search(fields, [], condition, [], orderBy, p, bs, index_name(tenant_id),
kb_ids)
dict_chunks = self.dataStore.get_fields(es_res, fields)
for id, doc in dict_chunks.items():
doc["id"] = id
if dict_chunks:
res.extend(dict_chunks.values())
# FIX: Solo terminar si no hay chunks, no si hay menos de bs
if len(dict_chunks.values()) == 0:
break
return res
def all_tags(self, tenant_id: str, kb_ids: list[str], S=1000):
if not self.dataStore.index_exist(index_name(tenant_id), kb_ids[0]):
return []
res = self.dataStore.search([], [], {}, [], OrderByExpr(), 0, 0, index_name(tenant_id), kb_ids, ["tag_kwd"])
return self.dataStore.get_aggregation(res, "tag_kwd")
def all_tags_in_portion(self, tenant_id: str, kb_ids: list[str], S=1000):
res = self.dataStore.search([], [], {}, [], OrderByExpr(), 0, 0, index_name(tenant_id), kb_ids, ["tag_kwd"])
res = self.dataStore.get_aggregation(res, "tag_kwd")
total = np.sum([c for _, c in res])
return {t: (c + 1) / (total + S) for t, c in res}
def tag_content(self, tenant_id: str, kb_ids: list[str], doc, all_tags, topn_tags=3, keywords_topn=30, S=1000):
idx_nm = index_name(tenant_id)
match_txt = self.qryr.paragraph(doc["title_tks"] + " " + doc["content_ltks"], doc.get("important_kwd", []),
keywords_topn)
res = self.dataStore.search([], [], {}, [match_txt], OrderByExpr(), 0, 0, idx_nm, kb_ids, ["tag_kwd"])
aggs = self.dataStore.get_aggregation(res, "tag_kwd")
if not aggs:
return False
cnt = np.sum([c for _, c in aggs])
tag_fea = sorted([(a, round(0.1 * (c + 1) / (cnt + S) / max(1e-6, all_tags.get(a, 0.0001)))) for a, c in aggs],
key=lambda x: x[1] * -1)[:topn_tags]
doc[TAG_FLD] = {a.replace(".", "_"): c for a, c in tag_fea if c > 0}
return True
def tag_query(self, question: str, tenant_ids: str | list[str], kb_ids: list[str], all_tags, topn_tags=3, S=1000):
if isinstance(tenant_ids, str):
idx_nms = index_name(tenant_ids)
else:
idx_nms = [index_name(tid) for tid in tenant_ids]
match_txt, _ = self.qryr.question(question, min_match=0.0)
res = self.dataStore.search([], [], {}, [match_txt], OrderByExpr(), 0, 0, idx_nms, kb_ids, ["tag_kwd"])
aggs = self.dataStore.get_aggregation(res, "tag_kwd")
if not aggs:
return {}
cnt = np.sum([c for _, c in aggs])
tag_fea = sorted([(a, round(0.1 * (c + 1) / (cnt + S) / max(1e-6, all_tags.get(a, 0.0001)))) for a, c in aggs],
key=lambda x: x[1] * -1)[:topn_tags]
return {a.replace(".", "_"): max(1, c) for a, c in tag_fea}
def retrieval_by_toc(self, query: str, chunks: list[dict], tenant_ids: list[str], chat_mdl, topn: int = 6):
if not chunks:
return []
idx_nms = [index_name(tid) for tid in tenant_ids]
ranks, doc_id2kb_id = {}, {}
for ck in chunks:
if ck["doc_id"] not in ranks:
ranks[ck["doc_id"]] = 0
ranks[ck["doc_id"]] += ck["similarity"]
doc_id2kb_id[ck["doc_id"]] = ck["kb_id"]
doc_id = sorted(ranks.items(), key=lambda x: x[1] * -1.)[0][0]
kb_ids = [doc_id2kb_id[doc_id]]
es_res = self.dataStore.search(["content_with_weight"], [], {"doc_id": doc_id, "toc_kwd": "toc"}, [],
OrderByExpr(), 0, 128, idx_nms,
kb_ids)
toc = []
dict_chunks = self.dataStore.get_fields(es_res, ["content_with_weight"])
for _, doc in dict_chunks.items():
try:
toc.extend(json.loads(doc["content_with_weight"]))
except Exception as e:
logging.exception(e)
if not toc:
return chunks
ids = asyncio.run(relevant_chunks_with_toc(query, toc, chat_mdl, topn * 2))
if not ids:
return chunks
vector_size = 1024
id2idx = {ck["chunk_id"]: i for i, ck in enumerate(chunks)}
for cid, sim in ids:
if cid in id2idx:
chunks[id2idx[cid]]["similarity"] += sim
continue
chunk = self.dataStore.get(cid, idx_nms, kb_ids)
if not chunk:
continue
d = {
"chunk_id": cid,
"content_ltks": chunk["content_ltks"],
"content_with_weight": chunk["content_with_weight"],
"doc_id": doc_id,
"docnm_kwd": chunk.get("docnm_kwd", ""),
"kb_id": chunk["kb_id"],
"important_kwd": chunk.get("important_kwd", []),
"image_id": chunk.get("img_id", ""),
"similarity": sim,
"vector_similarity": sim,
"term_similarity": sim,
"vector": [0.0] * vector_size,
"positions": chunk.get("position_int", []),
"doc_type_kwd": chunk.get("doc_type_kwd", "")
}
for k in chunk.keys():
if k[-4:] == "_vec":
d["vector"] = chunk[k]
vector_size = len(chunk[k])
break
chunks.append(d)
return sorted(chunks, key=lambda x: x["similarity"] * -1)[:topn]
def retrieval_by_children(self, chunks: list[dict], tenant_ids: list[str]):
if not chunks:
return []
idx_nms = [index_name(tid) for tid in tenant_ids]
mom_chunks = defaultdict(list)
i = 0
while i < len(chunks):
ck = chunks[i]
mom_id = ck.get("mom_id")
if not isinstance(mom_id, str) or not mom_id.strip():
i += 1
continue
mom_chunks[ck["mom_id"]].append(chunks.pop(i))
if not mom_chunks:
return chunks
if not chunks:
chunks = []
vector_size = 1024
for id, cks in mom_chunks.items():
chunk = self.dataStore.get(id, idx_nms, [ck["kb_id"] for ck in cks])
d = {
"chunk_id": id,
"content_ltks": " ".join([ck["content_ltks"] for ck in cks]),
"content_with_weight": chunk["content_with_weight"],
"doc_id": chunk["doc_id"],
"docnm_kwd": chunk.get("docnm_kwd", ""),
"kb_id": chunk["kb_id"],
"important_kwd": [kwd for ck in cks for kwd in ck.get("important_kwd", [])],
"image_id": chunk.get("img_id", ""),
"similarity": np.mean([ck["similarity"] for ck in cks]),
"vector_similarity": np.mean([ck["similarity"] for ck in cks]),
"term_similarity": np.mean([ck["similarity"] for ck in cks]),
"vector": [0.0] * vector_size,
"positions": chunk.get("position_int", []),
"doc_type_kwd": chunk.get("doc_type_kwd", "")
}
for k in cks[0].keys():
if k[-4:] == "_vec":
d["vector"] = cks[0][k]
vector_size = len(cks[0][k])
break
chunks.append(d)
return sorted(chunks, key=lambda x: x["similarity"] * -1)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/surname.py | rag/nlp/surname.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
m = set(["赵", "钱", "孙", "李",
"周", "吴", "郑", "王",
"冯", "陈", "褚", "卫",
"蒋", "沈", "韩", "杨",
"朱", "秦", "尤", "许",
"何", "吕", "施", "张",
"孔", "曹", "严", "华",
"金", "魏", "陶", "姜",
"戚", "谢", "邹", "喻",
"柏", "水", "窦", "章",
"云", "苏", "潘", "葛",
"奚", "范", "彭", "郎",
"鲁", "韦", "昌", "马",
"苗", "凤", "花", "方",
"俞", "任", "袁", "柳",
"酆", "鲍", "史", "唐",
"费", "廉", "岑", "薛",
"雷", "贺", "倪", "汤",
"滕", "殷", "罗", "毕",
"郝", "邬", "安", "常",
"乐", "于", "时", "傅",
"皮", "卞", "齐", "康",
"伍", "余", "元", "卜",
"顾", "孟", "平", "黄",
"和", "穆", "萧", "尹",
"姚", "邵", "湛", "汪",
"祁", "毛", "禹", "狄",
"米", "贝", "明", "臧",
"计", "伏", "成", "戴",
"谈", "宋", "茅", "庞",
"熊", "纪", "舒", "屈",
"项", "祝", "董", "梁",
"杜", "阮", "蓝", "闵",
"席", "季", "麻", "强",
"贾", "路", "娄", "危",
"江", "童", "颜", "郭",
"梅", "盛", "林", "刁",
"钟", "徐", "邱", "骆",
"高", "夏", "蔡", "田",
"樊", "胡", "凌", "霍",
"虞", "万", "支", "柯",
"昝", "管", "卢", "莫",
"经", "房", "裘", "缪",
"干", "解", "应", "宗",
"丁", "宣", "贲", "邓",
"郁", "单", "杭", "洪",
"包", "诸", "左", "石",
"崔", "吉", "钮", "龚",
"程", "嵇", "邢", "滑",
"裴", "陆", "荣", "翁",
"荀", "羊", "於", "惠",
"甄", "曲", "家", "封",
"芮", "羿", "储", "靳",
"汲", "邴", "糜", "松",
"井", "段", "富", "巫",
"乌", "焦", "巴", "弓",
"牧", "隗", "山", "谷",
"车", "侯", "宓", "蓬",
"全", "郗", "班", "仰",
"秋", "仲", "伊", "宫",
"宁", "仇", "栾", "暴",
"甘", "钭", "厉", "戎",
"祖", "武", "符", "刘",
"景", "詹", "束", "龙",
"叶", "幸", "司", "韶",
"郜", "黎", "蓟", "薄",
"印", "宿", "白", "怀",
"蒲", "邰", "从", "鄂",
"索", "咸", "籍", "赖",
"卓", "蔺", "屠", "蒙",
"池", "乔", "阴", "鬱",
"胥", "能", "苍", "双",
"闻", "莘", "党", "翟",
"谭", "贡", "劳", "逄",
"姬", "申", "扶", "堵",
"冉", "宰", "郦", "雍",
"郤", "璩", "桑", "桂",
"濮", "牛", "寿", "通",
"边", "扈", "燕", "冀",
"郏", "浦", "尚", "农",
"温", "别", "庄", "晏",
"柴", "瞿", "阎", "充",
"慕", "连", "茹", "习",
"宦", "艾", "鱼", "容",
"向", "古", "易", "慎",
"戈", "廖", "庾", "终",
"暨", "居", "衡", "步",
"都", "耿", "满", "弘",
"匡", "国", "文", "寇",
"广", "禄", "阙", "东",
"欧", "殳", "沃", "利",
"蔚", "越", "夔", "隆",
"师", "巩", "厍", "聂",
"晁", "勾", "敖", "融",
"冷", "訾", "辛", "阚",
"那", "简", "饶", "空",
"曾", "母", "沙", "乜",
"养", "鞠", "须", "丰",
"巢", "关", "蒯", "相",
"查", "后", "荆", "红",
"游", "竺", "权", "逯",
"盖", "益", "桓", "公",
"兰", "原", "乞", "西", "阿", "肖", "丑", "位", "曽", "巨", "德", "代", "圆", "尉", "仵", "纳", "仝", "脱",
"丘", "但", "展", "迪", "付", "覃", "晗", "特", "隋", "苑", "奥", "漆", "谌", "郄", "练", "扎", "邝", "渠",
"信", "门", "陳", "化", "原", "密", "泮", "鹿", "赫",
"万俟", "司马", "上官", "欧阳",
"夏侯", "诸葛", "闻人", "东方",
"赫连", "皇甫", "尉迟", "公羊",
"澹台", "公冶", "宗政", "濮阳",
"淳于", "单于", "太叔", "申屠",
"公孙", "仲孙", "轩辕", "令狐",
"钟离", "宇文", "长孙", "慕容",
"鲜于", "闾丘", "司徒", "司空",
"亓官", "司寇", "仉督", "子车",
"颛孙", "端木", "巫马", "公西",
"漆雕", "乐正", "壤驷", "公良",
"拓跋", "夹谷", "宰父", "榖梁",
"晋", "楚", "闫", "法", "汝", "鄢", "涂", "钦",
"段干", "百里", "东郭", "南门",
"呼延", "归", "海", "羊舌", "微", "生",
"岳", "帅", "缑", "亢", "况", "后", "有", "琴",
"梁丘", "左丘", "东门", "西门",
"商", "牟", "佘", "佴", "伯", "赏", "南宫",
"墨", "哈", "谯", "笪", "年", "爱", "阳", "佟",
"第五", "言", "福"])
def isit(n): return n.strip() in m
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/rag_tokenizer.py | rag/nlp/rag_tokenizer.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import infinity.rag_tokenizer
from common import settings
class RagTokenizer(infinity.rag_tokenizer.RagTokenizer):
def tokenize(self, line: str) -> str:
if settings.DOC_ENGINE_INFINITY:
return line
else:
return super().tokenize(line)
def fine_grained_tokenize(self, tks: str) -> str:
if settings.DOC_ENGINE_INFINITY:
return tks
else:
return super().fine_grained_tokenize(tks)
def is_chinese(s):
return infinity.rag_tokenizer.is_chinese(s)
def is_number(s):
return infinity.rag_tokenizer.is_number(s)
def is_alphabet(s):
return infinity.rag_tokenizer.is_alphabet(s)
def naive_qie(txt):
return infinity.rag_tokenizer.naive_qie(txt)
tokenizer = RagTokenizer()
tokenize = tokenizer.tokenize
fine_grained_tokenize = tokenizer.fine_grained_tokenize
tag = tokenizer.tag
freq = tokenizer.freq
tradi2simp = tokenizer._tradi2simp
strQ2B = tokenizer._strQ2B
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/query.py | rag/nlp/query.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import re
from collections import defaultdict
from common.query_base import QueryBase
from common.doc_store.doc_store_base import MatchTextExpr
from rag.nlp import rag_tokenizer, term_weight, synonym
class FulltextQueryer(QueryBase):
def __init__(self):
self.tw = term_weight.Dealer()
self.syn = synonym.Dealer()
self.query_fields = [
"title_tks^10",
"title_sm_tks^5",
"important_kwd^30",
"important_tks^20",
"question_tks^20",
"content_ltks^2",
"content_sm_ltks",
]
def question(self, txt, tbl="qa", min_match: float = 0.6):
original_query = txt
txt = self.add_space_between_eng_zh(txt)
txt = re.sub(
r"[ :|\r\n\t,,。??/`!!&^%%()\[\]{}<>]+",
" ",
rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(txt.lower())),
).strip()
otxt = txt
txt = self.rmWWW(txt)
if not self.is_chinese(txt):
txt = self.rmWWW(txt)
tks = rag_tokenizer.tokenize(txt).split()
keywords = [t for t in tks if t]
tks_w = self.tw.weights(tks, preprocess=False)
tks_w = [(re.sub(r"[ \\\"'^]", "", tk), w) for tk, w in tks_w]
tks_w = [(re.sub(r"^[a-z0-9]$", "", tk), w) for tk, w in tks_w if tk]
tks_w = [(re.sub(r"^[\+-]", "", tk), w) for tk, w in tks_w if tk]
tks_w = [(tk.strip(), w) for tk, w in tks_w if tk.strip()]
syns = []
for tk, w in tks_w[:256]:
syn = self.syn.lookup(tk)
syn = rag_tokenizer.tokenize(" ".join(syn)).split()
keywords.extend(syn)
syn = ["\"{}\"^{:.4f}".format(s, w / 4.) for s in syn if s.strip()]
syns.append(" ".join(syn))
q = ["({}^{:.4f}".format(tk, w) + " {})".format(syn) for (tk, w), syn in zip(tks_w, syns) if
tk and not re.match(r"[.^+\(\)-]", tk)]
for i in range(1, len(tks_w)):
left, right = tks_w[i - 1][0].strip(), tks_w[i][0].strip()
if not left or not right:
continue
q.append(
'"%s %s"^%.4f'
% (
tks_w[i - 1][0],
tks_w[i][0],
max(tks_w[i - 1][1], tks_w[i][1]) * 2,
)
)
if not q:
q.append(txt)
query = " ".join(q)
return MatchTextExpr(
self.query_fields, query, 100, {"original_query": original_query}
), keywords
def need_fine_grained_tokenize(tk):
if len(tk) < 3:
return False
if re.match(r"[0-9a-z\.\+#_\*-]+$", tk):
return False
return True
txt = self.rmWWW(txt)
qs, keywords = [], []
for tt in self.tw.split(txt)[:256]: # .split():
if not tt:
continue
keywords.append(tt)
twts = self.tw.weights([tt])
syns = self.syn.lookup(tt)
if syns and len(keywords) < 32:
keywords.extend(syns)
logging.debug(json.dumps(twts, ensure_ascii=False))
tms = []
for tk, w in sorted(twts, key=lambda x: x[1] * -1):
sm = (
rag_tokenizer.fine_grained_tokenize(tk).split()
if need_fine_grained_tokenize(tk)
else []
)
sm = [
re.sub(
r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+",
"",
m,
)
for m in sm
]
sm = [self.sub_special_char(m) for m in sm if len(m) > 1]
sm = [m for m in sm if len(m) > 1]
if len(keywords) < 32:
keywords.append(re.sub(r"[ \\\"']+", "", tk))
keywords.extend(sm)
tk_syns = self.syn.lookup(tk)
tk_syns = [self.sub_special_char(s) for s in tk_syns]
if len(keywords) < 32:
keywords.extend([s for s in tk_syns if s])
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns]
if len(keywords) >= 32:
break
tk = self.sub_special_char(tk)
if tk.find(" ") > 0:
tk = '"%s"' % tk
if tk_syns:
tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns)
if sm:
tk = f'{tk} OR "%s" OR ("%s"~2)^0.5' % (" ".join(sm), " ".join(sm))
if tk.strip():
tms.append((tk, w))
tms = " ".join([f"({t})^{w}" for t, w in tms])
if len(twts) > 1:
tms += ' ("%s"~2)^1.5' % rag_tokenizer.tokenize(tt)
syns = " OR ".join(
[
'"%s"'
% rag_tokenizer.tokenize(self.sub_special_char(s))
for s in syns
]
)
if syns and tms:
tms = f"({tms})^5 OR ({syns})^0.7"
qs.append(tms)
if qs:
query = " OR ".join([f"({t})" for t in qs if t])
if not query:
query = otxt
return MatchTextExpr(
self.query_fields, query, 100, {"minimum_should_match": min_match, "original_query": original_query}
), keywords
return None, keywords
def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3, vtweight=0.7):
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
sims = cosine_similarity([avec], bvecs)
tksim = self.token_similarity(atks, btkss)
if np.sum(sims[0]) == 0:
return np.array(tksim), tksim, sims[0]
return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0]
def token_similarity(self, atks, btkss):
def to_dict(tks):
if isinstance(tks, str):
tks = tks.split()
d = defaultdict(int)
wts = self.tw.weights(tks, preprocess=False)
for i, (t, c) in enumerate(wts):
d[t] += c
return d
atks = to_dict(atks)
btkss = [to_dict(tks) for tks in btkss]
return [self.similarity(atks, btks) for btks in btkss]
def similarity(self, qtwt, dtwt):
if isinstance(dtwt, type("")):
dtwt = {t: w for t, w in self.tw.weights(self.tw.split(dtwt), preprocess=False)}
if isinstance(qtwt, type("")):
qtwt = {t: w for t, w in self.tw.weights(self.tw.split(qtwt), preprocess=False)}
s = 1e-9
for k, v in qtwt.items():
if k in dtwt:
s += v # * dtwt[k]
q = 1e-9
for k, v in qtwt.items():
q += v # * v
return s / q # math.sqrt(3. * (s / q / math.log10( len(dtwt.keys()) + 512 )))
def paragraph(self, content_tks: str, keywords: list = [], keywords_topn=30):
if isinstance(content_tks, str):
content_tks = [c.strip() for c in content_tks.strip() if c.strip()]
tks_w = self.tw.weights(content_tks, preprocess=False)
origin_keywords = keywords.copy()
keywords = [f'"{k.strip()}"' for k in keywords]
for tk, w in sorted(tks_w, key=lambda x: x[1] * -1)[:keywords_topn]:
tk_syns = self.syn.lookup(tk)
tk_syns = [self.sub_special_char(s) for s in tk_syns]
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns]
tk = self.sub_special_char(tk)
if tk.find(" ") > 0:
tk = '"%s"' % tk
if tk_syns:
tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns)
if tk:
keywords.append(f"{tk}^{w}")
return MatchTextExpr(self.query_fields, " ".join(keywords), 100,
{"minimum_should_match": min(3, len(keywords) / 10),
"original_query": " ".join(origin_keywords)})
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/synonym.py | rag/nlp/synonym.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import os
import time
import re
from nltk.corpus import wordnet
from common.file_utils import get_project_base_directory
class Dealer:
def __init__(self, redis=None):
self.lookup_num = 100000000
self.load_tm = time.time() - 1000000
self.dictionary = None
path = os.path.join(get_project_base_directory(), "rag/res", "synonym.json")
try:
self.dictionary = json.load(open(path, 'r'))
self.dictionary = { (k.lower() if isinstance(k, str) else k): v for k, v in self.dictionary.items() }
except Exception:
logging.warning("Missing synonym.json")
self.dictionary = {}
if not redis:
logging.warning(
"Realtime synonym is disabled, since no redis connection.")
if not len(self.dictionary.keys()):
logging.warning("Fail to load synonym")
self.redis = redis
self.load()
def load(self):
if not self.redis:
return
if self.lookup_num < 100:
return
tm = time.time()
if tm - self.load_tm < 3600:
return
self.load_tm = time.time()
self.lookup_num = 0
d = self.redis.get("kevin_synonyms")
if not d:
return
try:
d = json.loads(d)
self.dictionary = d
except Exception as e:
logging.error("Fail to load synonym!" + str(e))
def lookup(self, tk, topn=8):
if not tk or not isinstance(tk, str):
return []
# 1) Check the custom dictionary first (both keys and tk are already lowercase)
self.lookup_num += 1
self.load()
key = re.sub(r"[ \t]+", " ", tk.strip())
res = self.dictionary.get(key, [])
if isinstance(res, str):
res = [res]
if res: # Found in dictionary → return directly
return res[:topn]
# 2) If not found and tk is purely alphabetical → fallback to WordNet
if re.fullmatch(r"[a-z]+", tk):
wn_set = {
re.sub("_", " ", syn.name().split(".")[0])
for syn in wordnet.synsets(tk)
}
wn_set.discard(tk) # Remove the original token itself
wn_res = [t for t in wn_set if t]
return wn_res[:topn]
# 3) Nothing found in either source
return []
if __name__ == '__main__':
dl = Dealer()
print(dl.dictionary)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/term_weight.py | rag/nlp/term_weight.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import math
import json
import re
import os
import numpy as np
from rag.nlp import rag_tokenizer
from common.file_utils import get_project_base_directory
class Dealer:
def __init__(self):
self.stop_words = set(["请问",
"您",
"你",
"我",
"他",
"是",
"的",
"就",
"有",
"于",
"及",
"即",
"在",
"为",
"最",
"有",
"从",
"以",
"了",
"将",
"与",
"吗",
"吧",
"中",
"#",
"什么",
"怎么",
"哪个",
"哪些",
"啥",
"相关"])
def load_dict(fnm):
res = {}
f = open(fnm, "r")
while True:
line = f.readline()
if not line:
break
arr = line.replace("\n", "").split("\t")
if len(arr) < 2:
res[arr[0]] = 0
else:
res[arr[0]] = int(arr[1])
c = 0
for _, v in res.items():
c += v
if c == 0:
return set(res.keys())
return res
fnm = os.path.join(get_project_base_directory(), "rag/res")
self.ne, self.df = {}, {}
try:
self.ne = json.load(open(os.path.join(fnm, "ner.json"), "r"))
except Exception:
logging.warning("Load ner.json FAIL!")
try:
self.df = load_dict(os.path.join(fnm, "term.freq"))
except Exception:
logging.warning("Load term.freq FAIL!")
def pretoken(self, txt, num=False, stpwd=True):
patt = [
r"[~—\t @#%!<>,\.\?\":;'\{\}\[\]_=\(\)\|,。?》•●○↓《;‘’:“”【¥ 】…¥!、·()×`&\\/「」\\]"
]
rewt = [
]
for p, r in rewt:
txt = re.sub(p, r, txt)
res = []
for t in rag_tokenizer.tokenize(txt).split():
tk = t
if (stpwd and tk in self.stop_words) or (
re.match(r"[0-9]$", tk) and not num):
continue
for p in patt:
if re.match(p, t):
tk = "#"
break
# tk = re.sub(r"([\+\\-])", r"\\\1", tk)
if tk != "#" and tk:
res.append(tk)
return res
def token_merge(self, tks):
def one_term(t):
return len(t) == 1 or re.match(r"[0-9a-z]{1,2}$", t)
res, i = [], 0
while i < len(tks):
j = i
if i == 0 and one_term(tks[i]) and len(
tks) > 1 and (len(tks[i + 1]) > 1 and not re.match(r"[0-9a-zA-Z]", tks[i + 1])): # 多 工位
res.append(" ".join(tks[0:2]))
i = 2
continue
while j < len(
tks) and tks[j] and tks[j] not in self.stop_words and one_term(tks[j]):
j += 1
if j - i > 1:
if j - i < 5:
res.append(" ".join(tks[i:j]))
i = j
else:
res.append(" ".join(tks[i:i + 2]))
i = i + 2
else:
if len(tks[i]) > 0:
res.append(tks[i])
i += 1
return [t for t in res if t]
def ner(self, t):
if not self.ne:
return ""
res = self.ne.get(t, "")
if res:
return res
def split(self, txt):
tks = []
for t in re.sub(r"[ \t]+", " ", txt).split():
if tks and re.match(r".*[a-zA-Z]$", tks[-1]) and \
re.match(r".*[a-zA-Z]$", t) and tks and \
self.ne.get(t, "") != "func" and self.ne.get(tks[-1], "") != "func":
tks[-1] = tks[-1] + " " + t
else:
tks.append(t)
return tks
def weights(self, tks, preprocess=True):
num_pattern = re.compile(r"[0-9,.]{2,}$")
short_letter_pattern = re.compile(r"[a-z]{1,2}$")
num_space_pattern = re.compile(r"[0-9. -]{2,}$")
letter_pattern = re.compile(r"[a-z. -]+$")
def ner(t):
if num_pattern.match(t):
return 2
if short_letter_pattern.match(t):
return 0.01
if not self.ne or t not in self.ne:
return 1
m = {"toxic": 2, "func": 1, "corp": 3, "loca": 3, "sch": 3, "stock": 3,
"firstnm": 1}
return m[self.ne[t]]
def postag(t):
t = rag_tokenizer.tag(t)
if t in set(["r", "c", "d"]):
return 0.3
if t in set(["ns", "nt"]):
return 3
if t in set(["n"]):
return 2
if re.match(r"[0-9-]+", t):
return 2
return 1
def freq(t):
if num_space_pattern.match(t):
return 3
s = rag_tokenizer.freq(t)
if not s and letter_pattern.match(t):
return 300
if not s:
s = 0
if not s and len(t) >= 4:
s = [tt for tt in rag_tokenizer.fine_grained_tokenize(t).split() if len(tt) > 1]
if len(s) > 1:
s = np.min([freq(tt) for tt in s]) / 6.
else:
s = 0
return max(s, 10)
def df(t):
if num_space_pattern.match(t):
return 5
if t in self.df:
return self.df[t] + 3
elif letter_pattern.match(t):
return 300
elif len(t) >= 4:
s = [tt for tt in rag_tokenizer.fine_grained_tokenize(t).split() if len(tt) > 1]
if len(s) > 1:
return max(3, np.min([df(tt) for tt in s]) / 6.)
return 3
def idf(s, N):
return math.log10(10 + ((N - s + 0.5) / (s + 0.5)))
tw = []
if not preprocess:
idf1 = np.array([idf(freq(t), 10000000) for t in tks])
idf2 = np.array([idf(df(t), 1000000000) for t in tks])
wts = (0.3 * idf1 + 0.7 * idf2) * \
np.array([ner(t) * postag(t) for t in tks])
wts = [s for s in wts]
tw = list(zip(tks, wts))
else:
for tk in tks:
tt = self.token_merge(self.pretoken(tk, True))
idf1 = np.array([idf(freq(t), 10000000) for t in tt])
idf2 = np.array([idf(df(t), 1000000000) for t in tt])
wts = (0.3 * idf1 + 0.7 * idf2) * \
np.array([ner(t) * postag(t) for t in tt])
wts = [s for s in wts]
tw.extend(zip(tt, wts))
S = np.sum([s for _, s in tw])
return [(t, s / S) for t, s in tw]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/rag/nlp/__init__.py | rag/nlp/__init__.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import random
from collections import Counter, defaultdict
from common.token_utils import num_tokens_from_string
import re
import copy
import roman_numbers as r
from word2number import w2n
from cn2an import cn2an
from PIL import Image
import chardet
__all__ = ['rag_tokenizer']
all_codecs = [
'utf-8', 'gb2312', 'gbk', 'utf_16', 'ascii', 'big5', 'big5hkscs',
'cp037', 'cp273', 'cp424', 'cp437',
'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
'cp858', 'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866', 'cp869',
'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006', 'cp1026', 'cp1125',
'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr',
'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7',
'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_13',
'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u',
'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2', 'mac_roman',
'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',
'utf_32', 'utf_32_be', 'utf_32_le', 'utf_16_be', 'utf_16_le', 'utf_7', 'windows-1250', 'windows-1251',
'windows-1252', 'windows-1253', 'windows-1254', 'windows-1255', 'windows-1256',
'windows-1257', 'windows-1258', 'latin-2'
]
def find_codec(blob):
detected = chardet.detect(blob[:1024])
if detected['confidence'] > 0.5:
if detected['encoding'] == "ascii":
return "utf-8"
for c in all_codecs:
try:
blob[:1024].decode(c)
return c
except Exception:
pass
try:
blob.decode(c)
return c
except Exception:
pass
return "utf-8"
QUESTION_PATTERN = [
r"第([零一二三四五六七八九十百0-9]+)问",
r"第([零一二三四五六七八九十百0-9]+)条",
r"[\((]([零一二三四五六七八九十百]+)[\))]",
r"第([0-9]+)问",
r"第([0-9]+)条",
r"([0-9]{1,2})[\. 、]",
r"([零一二三四五六七八九十百]+)[ 、]",
r"[\((]([0-9]{1,2})[\))]",
r"QUESTION (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
r"QUESTION (I+V?|VI*|XI|IX|X)",
r"QUESTION ([0-9]+)",
]
def has_qbullet(reg, box, last_box, last_index, last_bull, bull_x0_list):
section, last_section = box['text'], last_box['text']
q_reg = r'(\w|\W)*?(?:?|\?|\n|$)+'
full_reg = reg + q_reg
has_bull = re.match(full_reg, section)
index_str = None
if has_bull:
if 'x0' not in last_box:
last_box['x0'] = box['x0']
if 'top' not in last_box:
last_box['top'] = box['top']
if last_bull and box['x0'] - last_box['x0'] > 10:
return None, last_index
if not last_bull and box['x0'] >= last_box['x0'] and box['top'] - last_box['top'] < 20:
return None, last_index
avg_bull_x0 = 0
if bull_x0_list:
avg_bull_x0 = sum(bull_x0_list) / len(bull_x0_list)
else:
avg_bull_x0 = box['x0']
if box['x0'] - avg_bull_x0 > 10:
return None, last_index
index_str = has_bull.group(1)
index = index_int(index_str)
if last_section[-1] == ':' or last_section[-1] == ':':
return None, last_index
if not last_index or index >= last_index:
bull_x0_list.append(box['x0'])
return has_bull, index
if section[-1] == '?' or section[-1] == '?':
bull_x0_list.append(box['x0'])
return has_bull, index
if box['layout_type'] == 'title':
bull_x0_list.append(box['x0'])
return has_bull, index
pure_section = section.lstrip(re.match(reg, section).group()).lower()
ask_reg = r'(what|when|where|how|why|which|who|whose|为什么|为啥|哪)'
if re.match(ask_reg, pure_section):
bull_x0_list.append(box['x0'])
return has_bull, index
return None, last_index
def index_int(index_str):
res = -1
try:
res = int(index_str)
except ValueError:
try:
res = w2n.word_to_num(index_str)
except ValueError:
try:
res = cn2an(index_str)
except ValueError:
try:
res = r.number(index_str)
except ValueError:
return -1
return res
def qbullets_category(sections):
global QUESTION_PATTERN
hits = [0] * len(QUESTION_PATTERN)
for i, pro in enumerate(QUESTION_PATTERN):
for sec in sections:
if re.match(pro, sec) and not not_bullet(sec):
hits[i] += 1
break
maximum = 0
res = -1
for i, h in enumerate(hits):
if h <= maximum:
continue
res = i
maximum = h
return res, QUESTION_PATTERN[res]
BULLET_PATTERN = [[
r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
r"第[零一二三四五六七八九十百0-9]+章",
r"第[零一二三四五六七八九十百0-9]+节",
r"第[零一二三四五六七八九十百0-9]+条",
r"[\((][零一二三四五六七八九十百]+[\))]",
], [
r"第[0-9]+章",
r"第[0-9]+节",
r"[0-9]{,2}[\. 、]",
r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
], [
r"第[零一二三四五六七八九十百0-9]+章",
r"第[零一二三四五六七八九十百0-9]+节",
r"[零一二三四五六七八九十百]+[ 、]",
r"[\((][零一二三四五六七八九十百]+[\))]",
r"[\((][0-9]{,2}[\))]",
], [
r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
r"Chapter (I+V?|VI*|XI|IX|X)",
r"Section [0-9]+",
r"Article [0-9]+"
], [
r"^#[^#]",
r"^##[^#]",
r"^###.*",
r"^####.*",
r"^#####.*",
r"^######.*",
]
]
def random_choices(arr, k):
k = min(len(arr), k)
return random.choices(arr, k=k)
def not_bullet(line):
patt = [
r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
]
return any([re.match(r, line) for r in patt])
def bullets_category(sections):
global BULLET_PATTERN
hits = [0] * len(BULLET_PATTERN)
for i, pro in enumerate(BULLET_PATTERN):
for sec in sections:
sec = sec.strip()
for p in pro:
if re.match(p, sec) and not not_bullet(sec):
hits[i] += 1
break
maximum = 0
res = -1
for i, h in enumerate(hits):
if h <= maximum:
continue
res = i
maximum = h
return res
def is_english(texts):
if not texts:
return False
pattern = re.compile(r"[`a-zA-Z0-9\s.,':;/\"?<>!\(\)\-]")
if isinstance(texts, str):
texts = list(texts)
elif isinstance(texts, list):
texts = [t for t in texts if isinstance(t, str) and t.strip()]
else:
return False
if not texts:
return False
eng = sum(1 for t in texts if pattern.fullmatch(t.strip()))
return (eng / len(texts)) > 0.8
def is_chinese(text):
if not text:
return False
chinese = 0
for ch in text:
if '\u4e00' <= ch <= '\u9fff':
chinese += 1
if chinese / len(text) > 0.2:
return True
return False
def tokenize(d, txt, eng):
from . import rag_tokenizer
d["content_with_weight"] = txt
t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", txt)
d["content_ltks"] = rag_tokenizer.tokenize(t)
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
def split_with_pattern(d, pattern: str, content: str, eng) -> list:
docs = []
txts = [txt for txt in re.split(r"(%s)" % pattern, content, flags=re.DOTALL)]
for j in range(0, len(txts), 2):
txt = txts[j]
if not txt:
continue
if j + 1 < len(txts):
txt += txts[j + 1]
dd = copy.deepcopy(d)
tokenize(dd, txt, eng)
docs.append(dd)
return docs
def tokenize_chunks(chunks, doc, eng, pdf_parser=None, child_delimiters_pattern=None):
res = []
# wrap up as es documents
for ii, ck in enumerate(chunks):
if len(ck.strip()) == 0:
continue
logging.debug("-- {}".format(ck))
d = copy.deepcopy(doc)
if pdf_parser:
try:
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
except NotImplementedError:
pass
else:
add_positions(d, [[ii] * 5])
if child_delimiters_pattern:
d["mom_with_weight"] = ck
res.extend(split_with_pattern(d, child_delimiters_pattern, ck, eng))
continue
tokenize(d, ck, eng)
res.append(d)
return res
def tokenize_chunks_with_images(chunks, doc, eng, images, child_delimiters_pattern=None):
res = []
# wrap up as es documents
for ii, (ck, image) in enumerate(zip(chunks, images)):
if len(ck.strip()) == 0:
continue
logging.debug("-- {}".format(ck))
d = copy.deepcopy(doc)
d["image"] = image
add_positions(d, [[ii] * 5])
if child_delimiters_pattern:
d["mom_with_weight"] = ck
res.extend(split_with_pattern(d, child_delimiters_pattern, ck, eng))
continue
tokenize(d, ck, eng)
res.append(d)
return res
def tokenize_table(tbls, doc, eng, batch_size=10):
res = []
# add tables
for (img, rows), poss in tbls:
if not rows:
continue
if isinstance(rows, str):
d = copy.deepcopy(doc)
tokenize(d, rows, eng)
d["content_with_weight"] = rows
d["doc_type_kwd"] = "table"
if img:
d["image"] = img
if d["content_with_weight"].find("<tr>") < 0:
d["doc_type_kwd"] = "image"
if poss:
add_positions(d, poss)
res.append(d)
continue
de = "; " if eng else "; "
for i in range(0, len(rows), batch_size):
d = copy.deepcopy(doc)
r = de.join(rows[i:i + batch_size])
tokenize(d, r, eng)
d["doc_type_kwd"] = "table"
if img:
d["image"] = img
if d["content_with_weight"].find("<tr>") < 0:
d["doc_type_kwd"] = "image"
add_positions(d, poss)
res.append(d)
return res
def attach_media_context(chunks, table_context_size=0, image_context_size=0):
"""
Attach surrounding text chunk content to media chunks (table/image).
Best-effort ordering: if positional info exists on any chunk, use it to
order chunks before collecting context; otherwise keep original order.
"""
from . import rag_tokenizer
if not chunks or (table_context_size <= 0 and image_context_size <= 0):
return chunks
def is_image_chunk(ck):
if ck.get("doc_type_kwd") == "image":
return True
text_val = ck.get("content_with_weight") if isinstance(ck.get("content_with_weight"), str) else ck.get("text")
has_text = isinstance(text_val, str) and text_val.strip()
return bool(ck.get("image")) and not has_text
def is_table_chunk(ck):
return ck.get("doc_type_kwd") == "table"
def is_text_chunk(ck):
return not is_image_chunk(ck) and not is_table_chunk(ck)
def get_text(ck):
if isinstance(ck.get("content_with_weight"), str):
return ck["content_with_weight"]
if isinstance(ck.get("text"), str):
return ck["text"]
return ""
def split_sentences(text):
pattern = r"([.。!?!?;;::\n])"
parts = re.split(pattern, text)
sentences = []
buf = ""
for p in parts:
if not p:
continue
if re.fullmatch(pattern, p):
buf += p
sentences.append(buf)
buf = ""
else:
buf += p
if buf:
sentences.append(buf)
return sentences
def get_bounds_by_page(ck):
bounds = {}
try:
if ck.get("position_int"):
for pos in ck["position_int"]:
if not pos or len(pos) < 5:
continue
pn, _, _, top, bottom = pos
if pn is None or top is None:
continue
top_val = float(top)
bottom_val = float(bottom) if bottom is not None else top_val
if bottom_val < top_val:
top_val, bottom_val = bottom_val, top_val
pn = int(pn)
if pn in bounds:
bounds[pn] = (min(bounds[pn][0], top_val), max(bounds[pn][1], bottom_val))
else:
bounds[pn] = (top_val, bottom_val)
else:
pn = None
if ck.get("page_num_int"):
pn = ck["page_num_int"][0]
elif ck.get("page_number") is not None:
pn = ck.get("page_number")
if pn is None:
return bounds
top = None
if ck.get("top_int"):
top = ck["top_int"][0]
elif ck.get("top") is not None:
top = ck.get("top")
if top is None:
return bounds
bottom = ck.get("bottom")
pn = int(pn)
top_val = float(top)
bottom_val = float(bottom) if bottom is not None else top_val
if bottom_val < top_val:
top_val, bottom_val = bottom_val, top_val
bounds[pn] = (top_val, bottom_val)
except Exception:
return {}
return bounds
def trim_to_tokens(text, token_budget, from_tail=False):
if token_budget <= 0 or not text:
return ""
sentences = split_sentences(text)
if not sentences:
return ""
collected = []
remaining = token_budget
seq = reversed(sentences) if from_tail else sentences
for s in seq:
tks = num_tokens_from_string(s)
if tks <= 0:
continue
if tks > remaining:
collected.append(s)
break
collected.append(s)
remaining -= tks
if from_tail:
collected = list(reversed(collected))
return "".join(collected)
def find_mid_sentence_index(sentences):
if not sentences:
return 0
total = sum(max(0, num_tokens_from_string(s)) for s in sentences)
if total <= 0:
return max(0, len(sentences) // 2)
target = total / 2.0
best_idx = 0
best_diff = None
cum = 0
for i, s in enumerate(sentences):
cum += max(0, num_tokens_from_string(s))
diff = abs(cum - target)
if best_diff is None or diff < best_diff:
best_diff = diff
best_idx = i
return best_idx
def collect_context_from_sentences(sentences, boundary_idx, token_budget):
prev_ctx = []
remaining_prev = token_budget
for s in reversed(sentences[:boundary_idx + 1]):
if remaining_prev <= 0:
break
tks = num_tokens_from_string(s)
if tks <= 0:
continue
if tks > remaining_prev:
s = trim_to_tokens(s, remaining_prev, from_tail=True)
tks = num_tokens_from_string(s)
prev_ctx.append(s)
remaining_prev -= tks
prev_ctx.reverse()
next_ctx = []
remaining_next = token_budget
for s in sentences[boundary_idx + 1:]:
if remaining_next <= 0:
break
tks = num_tokens_from_string(s)
if tks <= 0:
continue
if tks > remaining_next:
s = trim_to_tokens(s, remaining_next, from_tail=False)
tks = num_tokens_from_string(s)
next_ctx.append(s)
remaining_next -= tks
return prev_ctx, next_ctx
def extract_position(ck):
pn = None
top = None
left = None
try:
if ck.get("page_num_int"):
pn = ck["page_num_int"][0]
elif ck.get("page_number") is not None:
pn = ck.get("page_number")
if ck.get("top_int"):
top = ck["top_int"][0]
elif ck.get("top") is not None:
top = ck.get("top")
if ck.get("position_int"):
left = ck["position_int"][0][1]
elif ck.get("x0") is not None:
left = ck.get("x0")
except Exception:
pn = top = left = None
return pn, top, left
indexed = list(enumerate(chunks))
positioned_indices = []
unpositioned_indices = []
for idx, ck in indexed:
pn, top, left = extract_position(ck)
if pn is not None and top is not None:
positioned_indices.append((idx, pn, top, left if left is not None else 0))
else:
unpositioned_indices.append(idx)
if positioned_indices:
positioned_indices.sort(key=lambda x: (int(x[1]), int(x[2]), int(x[3]), x[0]))
ordered_indices = [i for i, _, _, _ in positioned_indices] + unpositioned_indices
else:
ordered_indices = [idx for idx, _ in indexed]
text_bounds = []
for idx, ck in indexed:
if not is_text_chunk(ck):
continue
bounds = get_bounds_by_page(ck)
if bounds:
text_bounds.append((idx, bounds))
for sorted_pos, idx in enumerate(ordered_indices):
ck = chunks[idx]
token_budget = image_context_size if is_image_chunk(ck) else table_context_size if is_table_chunk(ck) else 0
if token_budget <= 0:
continue
prev_ctx = []
next_ctx = []
media_bounds = get_bounds_by_page(ck)
best_idx = None
best_dist = None
candidate_count = 0
if media_bounds and text_bounds:
for text_idx, bounds in text_bounds:
for pn, (t_top, t_bottom) in bounds.items():
if pn not in media_bounds:
continue
m_top, m_bottom = media_bounds[pn]
if m_bottom < t_top or m_top > t_bottom:
continue
candidate_count += 1
m_mid = (m_top + m_bottom) / 2.0
t_mid = (t_top + t_bottom) / 2.0
dist = abs(m_mid - t_mid)
if best_dist is None or dist < best_dist:
best_dist = dist
best_idx = text_idx
if best_idx is None and media_bounds:
media_page = min(media_bounds.keys())
page_order = []
for ordered_idx in ordered_indices:
pn, _, _ = extract_position(chunks[ordered_idx])
if pn == media_page:
page_order.append(ordered_idx)
if page_order and idx in page_order:
pos_in_page = page_order.index(idx)
if pos_in_page == 0:
for neighbor in page_order[pos_in_page + 1:]:
if is_text_chunk(chunks[neighbor]):
best_idx = neighbor
break
elif pos_in_page == len(page_order) - 1:
for neighbor in reversed(page_order[:pos_in_page]):
if is_text_chunk(chunks[neighbor]):
best_idx = neighbor
break
if best_idx is not None:
base_text = get_text(chunks[best_idx])
sentences = split_sentences(base_text)
if sentences:
boundary_idx = find_mid_sentence_index(sentences)
prev_ctx, next_ctx = collect_context_from_sentences(sentences, boundary_idx, token_budget)
if not prev_ctx and not next_ctx:
continue
self_text = get_text(ck)
pieces = [*prev_ctx]
if self_text:
pieces.append(self_text)
pieces.extend(next_ctx)
combined = "\n".join(pieces)
original = ck.get("content_with_weight")
if "content_with_weight" in ck:
ck["content_with_weight"] = combined
elif "text" in ck:
original = ck.get("text")
ck["text"] = combined
if combined != original:
if "content_ltks" in ck:
ck["content_ltks"] = rag_tokenizer.tokenize(combined)
if "content_sm_ltks" in ck:
ck["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(
ck.get("content_ltks", rag_tokenizer.tokenize(combined)))
if positioned_indices:
chunks[:] = [chunks[i] for i in ordered_indices]
return chunks
def append_context2table_image4pdf(sections: list, tabls: list, table_context_size=0):
from deepdoc.parser import PdfParser
if table_context_size <=0:
return tabls
page_bucket = defaultdict(list)
for i, (txt, poss) in enumerate(sections):
poss = PdfParser.extract_positions(poss)
for page, left, right, top, bottom in poss:
page = page[0]
page_bucket[page].append(((left, top, right, bottom), txt))
def upper_context(page, i):
txt = ""
if page not in page_bucket:
i = -1
while num_tokens_from_string(txt) < table_context_size:
if i < 0:
page -= 1
if page < 0 or page not in page_bucket:
break
i = len(page_bucket[page]) -1
blks = page_bucket[page]
(_, _, _, _), cnt = blks[i]
txts = re.split(r"([。!??;!\n]|\. )", cnt, flags=re.DOTALL)[::-1]
for j in range(0, len(txts), 2):
txt = (txts[j+1] if j+1<len(txts) else "") + txts[j] + txt
if num_tokens_from_string(txt) > table_context_size:
break
i -= 1
return txt
def lower_context(page, i):
txt = ""
if page not in page_bucket:
return txt
while num_tokens_from_string(txt) < table_context_size:
if i >= len(page_bucket[page]):
page += 1
if page not in page_bucket:
break
i = 0
blks = page_bucket[page]
(_, _, _, _), cnt = blks[i]
txts = re.split(r"([。!??;!\n]|\. )", cnt, flags=re.DOTALL)
for j in range(0, len(txts), 2):
txt += txts[j] + (txts[j+1] if j+1<len(txts) else "")
if num_tokens_from_string(txt) > table_context_size:
break
i += 1
return txt
res = []
for (img, tb), poss in tabls:
page, left, top, right, bott = poss[0]
_page, _left, _top, _right, _bott = poss[-1]
if isinstance(tb, list):
tb = "\n".join(tb)
i = 0
blks = page_bucket.get(page, [])
_tb = tb
while i < len(blks):
if i + 1 >= len(blks):
if _page > page:
page += 1
i = 0
blks = page_bucket.get(page, [])
continue
tb = upper_context(page, i) + tb + lower_context(page+1, 0)
break
(_, t, r, b), txt = blks[i]
if b > top:
break
(_, _t, _r, _b), _txt = blks[i+1]
if _t < _bott:
i += 1
continue
tb = upper_context(page, i) + tb + lower_context(page, i)
break
if _tb == tb:
tb = upper_context(page, -1) + tb + lower_context(page+1, 0)
res.append(((img, tb), poss))
return res
def add_positions(d, poss):
if not poss:
return
page_num_int = []
position_int = []
top_int = []
for pn, left, right, top, bottom in poss:
page_num_int.append(int(pn + 1))
top_int.append(int(top))
position_int.append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
d["page_num_int"] = page_num_int
d["position_int"] = position_int
d["top_int"] = top_int
def remove_contents_table(sections, eng=False):
i = 0
while i < len(sections):
def get(i):
nonlocal sections
return (sections[i] if isinstance(sections[i],
type("")) else sections[i][0]).strip()
if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], flags=re.IGNORECASE)):
i += 1
continue
sections.pop(i)
if i >= len(sections):
break
prefix = get(i)[:3] if not eng else " ".join(get(i).split()[:2])
while not prefix:
sections.pop(i)
if i >= len(sections):
break
prefix = get(i)[:3] if not eng else " ".join(get(i).split()[:2])
sections.pop(i)
if i >= len(sections) or not prefix:
break
for j in range(i, min(i + 128, len(sections))):
if not re.match(prefix, get(j)):
continue
for _ in range(i, j):
sections.pop(i)
break
def make_colon_as_title(sections):
if not sections:
return []
if isinstance(sections[0], type("")):
return sections
i = 0
while i < len(sections):
txt, layout = sections[i]
i += 1
txt = txt.split("@")[0].strip()
if not txt:
continue
if txt[-1] not in "::":
continue
txt = txt[::-1]
arr = re.split(r"([。?!!?;;]| \.)", txt)
if len(arr) < 2 or len(arr[1]) < 32:
continue
sections.insert(i - 1, (arr[0][::-1], "title"))
i += 1
def title_frequency(bull, sections):
bullets_size = len(BULLET_PATTERN[bull])
levels = [bullets_size + 1 for _ in range(len(sections))]
if not sections or bull < 0:
return bullets_size + 1, levels
for i, (txt, layout) in enumerate(sections):
for j, p in enumerate(BULLET_PATTERN[bull]):
if re.match(p, txt.strip()) and not not_bullet(txt):
levels[i] = j
break
else:
if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
levels[i] = bullets_size
most_level = bullets_size + 1
for level, c in sorted(Counter(levels).items(), key=lambda x: x[1] * -1):
if level <= bullets_size:
most_level = level
break
return most_level, levels
def not_title(txt):
if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
return False
if len(txt.split()) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
return True
return re.search(r"[,;,。;!!]", txt)
def tree_merge(bull, sections, depth):
if not sections or bull < 0:
return sections
if isinstance(sections[0], type("")):
sections = [(s, "") for s in sections]
# filter out position information in pdf sections
sections = [(t, o) for t, o in sections if
t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
def get_level(bull, section):
text, layout = section
text = re.sub(r"\u3000", " ", text).strip()
for i, title in enumerate(BULLET_PATTERN[bull]):
if re.match(title, text.strip()):
return i + 1, text
else:
if re.search(r"(title|head)", layout) and not not_title(text):
return len(BULLET_PATTERN[bull]) + 1, text
else:
return len(BULLET_PATTERN[bull]) + 2, text
level_set = set()
lines = []
for section in sections:
level, text = get_level(bull, section)
if not text.strip("\n"):
continue
lines.append((level, text))
level_set.add(level)
sorted_levels = sorted(list(level_set))
if depth <= len(sorted_levels):
target_level = sorted_levels[depth - 1]
else:
target_level = sorted_levels[-1]
if target_level == len(BULLET_PATTERN[bull]) + 2:
target_level = sorted_levels[-2] if len(sorted_levels) > 1 else sorted_levels[0]
root = Node(level=0, depth=target_level, texts=[])
root.build_tree(lines)
return [element for element in root.get_tree() if element]
def hierarchical_merge(bull, sections, depth):
if not sections or bull < 0:
return []
if isinstance(sections[0], type("")):
sections = [(s, "") for s in sections]
sections = [(t, o) for t, o in sections if
t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
bullets_size = len(BULLET_PATTERN[bull])
levels = [[] for _ in range(bullets_size + 2)]
for i, (txt, layout) in enumerate(sections):
for j, p in enumerate(BULLET_PATTERN[bull]):
if re.match(p, txt.strip()):
levels[j].append(i)
break
else:
if re.search(r"(title|head)", layout) and not not_title(txt):
levels[bullets_size].append(i)
else:
levels[bullets_size + 1].append(i)
sections = [t for t, _ in sections]
# for s in sections: print("--", s)
def binary_search(arr, target):
if not arr:
return -1
if target > arr[-1]:
return len(arr) - 1
if target < arr[0]:
return -1
s, e = 0, len(arr)
while e - s > 1:
i = (e + s) // 2
if target > arr[i]:
s = i
continue
elif target < arr[i]:
e = i
continue
else:
assert False
return s
cks = []
readed = [False] * len(sections)
levels = levels[::-1]
for i, arr in enumerate(levels[:depth]):
for j in arr:
if readed[j]:
continue
readed[j] = True
cks.append([j])
if i + 1 == len(levels) - 1:
continue
for ii in range(i + 1, len(levels)):
jj = binary_search(levels[ii], j)
if jj < 0:
continue
if levels[ii][jj] > cks[-1][-1]:
cks[-1].pop(-1)
cks[-1].append(levels[ii][jj])
for ii in cks[-1]:
readed[ii] = True
if not cks:
return cks
for i in range(len(cks)):
cks[i] = [sections[j] for j in cks[i][::-1]]
logging.debug("\n* ".join(cks[i]))
res = [[]]
num = [0]
for ck in cks:
if len(ck) == 1:
n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
if n + num[-1] < 218:
res[-1].append(ck[0])
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/__init__.py | deepdoc/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from beartype.claw import beartype_this_package
beartype_this_package()
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/t_recognizer.py | deepdoc/vision/t_recognizer.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import sys
sys.path.insert(
0,
os.path.abspath(
os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'../../')))
from deepdoc.vision.seeit import draw_box
from deepdoc.vision import LayoutRecognizer, TableStructureRecognizer, OCR, init_in_out
import argparse
import re
import numpy as np
def main(args):
images, outputs = init_in_out(args)
if args.mode.lower() == "layout":
detr = LayoutRecognizer("layout")
layouts = detr.forward(images, thr=float(args.threshold))
if args.mode.lower() == "tsr":
detr = TableStructureRecognizer()
ocr = OCR()
layouts = detr(images, thr=float(args.threshold))
for i, lyt in enumerate(layouts):
if args.mode.lower() == "tsr":
#lyt = [t for t in lyt if t["type"] == "table column"]
html = get_table_html(images[i], lyt, ocr)
with open(outputs[i] + ".html", "w+", encoding='utf-8') as f:
f.write(html)
lyt = [{
"type": t["label"],
"bbox": [t["x0"], t["top"], t["x1"], t["bottom"]],
"score": t["score"]
} for t in lyt]
img = draw_box(images[i], lyt, detr.labels, float(args.threshold))
img.save(outputs[i], quality=95)
logging.info("save result to: " + outputs[i])
def get_table_html(img, tb_cpns, ocr):
boxes = ocr(np.array(img))
boxes = LayoutRecognizer.sort_Y_firstly(
[{"x0": b[0][0], "x1": b[1][0],
"top": b[0][1], "text": t[0],
"bottom": b[-1][1],
"layout_type": "table",
"page_number": 0} for b, t in boxes if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]],
np.mean([b[-1][1] - b[0][1] for b, _ in boxes]) / 3
)
def gather(kwd, fzy=10, ption=0.6):
nonlocal boxes
eles = LayoutRecognizer.sort_Y_firstly(
[r for r in tb_cpns if re.match(kwd, r["label"])], fzy)
eles = LayoutRecognizer.layouts_cleanup(boxes, eles, 5, ption)
return LayoutRecognizer.sort_Y_firstly(eles, 0)
headers = gather(r".*header$")
rows = gather(r".* (row|header)")
spans = gather(r".*spanning")
clmns = sorted([r for r in tb_cpns if re.match(
r"table column$", r["label"])], key=lambda x: x["x0"])
clmns = LayoutRecognizer.layouts_cleanup(boxes, clmns, 5, 0.5)
for b in boxes:
ii = LayoutRecognizer.find_overlapped_with_threshold(b, rows, thr=0.3)
if ii is not None:
b["R"] = ii
b["R_top"] = rows[ii]["top"]
b["R_bott"] = rows[ii]["bottom"]
ii = LayoutRecognizer.find_overlapped_with_threshold(b, headers, thr=0.3)
if ii is not None:
b["H_top"] = headers[ii]["top"]
b["H_bott"] = headers[ii]["bottom"]
b["H_left"] = headers[ii]["x0"]
b["H_right"] = headers[ii]["x1"]
b["H"] = ii
ii = LayoutRecognizer.find_horizontally_tightest_fit(b, clmns)
if ii is not None:
b["C"] = ii
b["C_left"] = clmns[ii]["x0"]
b["C_right"] = clmns[ii]["x1"]
ii = LayoutRecognizer.find_overlapped_with_threshold(b, spans, thr=0.3)
if ii is not None:
b["H_top"] = spans[ii]["top"]
b["H_bott"] = spans[ii]["bottom"]
b["H_left"] = spans[ii]["x0"]
b["H_right"] = spans[ii]["x1"]
b["SP"] = ii
html = """
<html>
<head>
<style>
._table_1nkzy_11 {
margin: auto;
width: 70%%;
padding: 10px;
}
._table_1nkzy_11 p {
margin-bottom: 50px;
border: 1px solid #e1e1e1;
}
caption {
color: #6ac1ca;
font-size: 20px;
height: 50px;
line-height: 50px;
font-weight: 600;
margin-bottom: 10px;
}
._table_1nkzy_11 table {
width: 100%%;
border-collapse: collapse;
}
th {
color: #fff;
background-color: #6ac1ca;
}
td:hover {
background: #c1e8e8;
}
tr:nth-child(even) {
background-color: #f2f2f2;
}
._table_1nkzy_11 th,
._table_1nkzy_11 td {
text-align: center;
border: 1px solid #ddd;
padding: 8px;
}
</style>
</head>
<body>
%s
</body>
</html>
""" % TableStructureRecognizer.construct_table(boxes, html=True)
return html
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--inputs',
help="Directory where to store images or PDFs, or a file path to a single image or PDF",
required=True)
parser.add_argument('--output_dir', help="Directory where to store the output images. Default: './layouts_outputs'",
default="./layouts_outputs")
parser.add_argument(
'--threshold',
help="A threshold to filter out detections. Default: 0.5",
default=0.5)
parser.add_argument('--mode', help="Task mode: layout recognition or table structure recognition", choices=["layout", "tsr"],
default="layout")
args = parser.parse_args()
main(args)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/postprocess.py | deepdoc/vision/postprocess.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import re
import numpy as np
import cv2
from shapely.geometry import Polygon
import pyclipper
def build_post_process(config, global_config=None):
support_dict = {'DBPostProcess': DBPostProcess, 'CTCLabelDecode': CTCLabelDecode}
config = copy.deepcopy(config)
module_name = config.pop('name')
if module_name == "None":
return
if global_config is not None:
config.update(global_config)
module_class = support_dict.get(module_name)
if module_class is None:
raise ValueError(
'post process only support {}'.format(list(support_dict)))
return module_class(**config)
class DBPostProcess:
"""
The post process for Differentiable Binarization (DB).
"""
def __init__(self,
thresh=0.3,
box_thresh=0.7,
max_candidates=1000,
unclip_ratio=2.0,
use_dilation=False,
score_mode="fast",
box_type='quad',
**kwargs):
self.thresh = thresh
self.box_thresh = box_thresh
self.max_candidates = max_candidates
self.unclip_ratio = unclip_ratio
self.min_size = 3
self.score_mode = score_mode
self.box_type = box_type
assert score_mode in [
"slow", "fast"
], "Score mode must be in [slow, fast] but got: {}".format(score_mode)
self.dilation_kernel = None if not use_dilation else np.array(
[[1, 1], [1, 1]])
def polygons_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
"""
_bitmap: single map with shape (1, H, W),
whose values are binarized as {0, 1}
"""
bitmap = _bitmap
height, width = bitmap.shape
boxes = []
scores = []
contours, _ = cv2.findContours((bitmap * 255).astype(np.uint8),
cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours[:self.max_candidates]:
epsilon = 0.002 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
points = approx.reshape((-1, 2))
if points.shape[0] < 4:
continue
score = self.box_score_fast(pred, points.reshape(-1, 2))
if self.box_thresh > score:
continue
if points.shape[0] > 2:
box = self.unclip(points, self.unclip_ratio)
if len(box) > 1:
continue
else:
continue
box = box.reshape(-1, 2)
_, sside = self.get_mini_boxes(box.reshape((-1, 1, 2)))
if sside < self.min_size + 2:
continue
box = np.array(box)
box[:, 0] = np.clip(
np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(
np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes.append(box.tolist())
scores.append(score)
return boxes, scores
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
"""
_bitmap: single map with shape (1, H, W),
whose values are binarized as {0, 1}
"""
bitmap = _bitmap
height, width = bitmap.shape
outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
if len(outs) == 3:
_img, contours, _ = outs[0], outs[1], outs[2]
elif len(outs) == 2:
contours, _ = outs[0], outs[1]
num_contours = min(len(contours), self.max_candidates)
boxes = []
scores = []
for index in range(num_contours):
contour = contours[index]
points, sside = self.get_mini_boxes(contour)
if sside < self.min_size:
continue
points = np.array(points)
if self.score_mode == "fast":
score = self.box_score_fast(pred, points.reshape(-1, 2))
else:
score = self.box_score_slow(pred, contour)
if self.box_thresh > score:
continue
box = self.unclip(points, self.unclip_ratio).reshape(-1, 1, 2)
box, sside = self.get_mini_boxes(box)
if sside < self.min_size + 2:
continue
box = np.array(box)
box[:, 0] = np.clip(
np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(
np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes.append(box.astype("int32"))
scores.append(score)
return np.array(boxes, dtype="int32"), scores
def unclip(self, box, unclip_ratio):
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset()
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded = np.array(offset.Execute(distance))
return expanded
def get_mini_boxes(self, contour):
bounding_box = cv2.minAreaRect(contour)
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
if points[1][1] > points[0][1]:
index_1 = 0
index_4 = 1
else:
index_1 = 1
index_4 = 0
if points[3][1] > points[2][1]:
index_2 = 2
index_3 = 3
else:
index_2 = 3
index_3 = 2
box = [
points[index_1], points[index_2], points[index_3], points[index_4]
]
return box, min(bounding_box[1])
def box_score_fast(self, bitmap, _box):
"""
box_score_fast: use bbox mean score as the mean score
"""
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype("int32"), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype("int32"), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype("int32"), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype("int32"), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype("int32"), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
def box_score_slow(self, bitmap, contour):
"""
box_score_slow: use polygon mean score as the mean score
"""
h, w = bitmap.shape[:2]
contour = contour.copy()
contour = np.reshape(contour, (-1, 2))
xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)
xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)
ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)
ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
contour[:, 0] = contour[:, 0] - xmin
contour[:, 1] = contour[:, 1] - ymin
cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype("int32"), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
def __call__(self, outs_dict, shape_list):
pred = outs_dict['maps']
if not isinstance(pred, np.ndarray):
pred = pred.numpy()
pred = pred[:, 0, :, :]
segmentation = pred > self.thresh
boxes_batch = []
for batch_index in range(pred.shape[0]):
src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
if self.dilation_kernel is not None:
mask = cv2.dilate(
np.array(segmentation[batch_index]).astype(np.uint8),
self.dilation_kernel)
else:
mask = segmentation[batch_index]
if self.box_type == 'poly':
boxes, scores = self.polygons_from_bitmap(pred[batch_index],
mask, src_w, src_h)
elif self.box_type == 'quad':
boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,
src_w, src_h)
else:
raise ValueError(
"box_type can only be one of ['quad', 'poly']")
boxes_batch.append({'points': boxes})
return boxes_batch
class BaseRecLabelDecode:
""" Convert between text-label and text-index """
def __init__(self, character_dict_path=None, use_space_char=False):
self.beg_str = "sos"
self.end_str = "eos"
self.reverse = False
self.character_str = []
if character_dict_path is None:
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
else:
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
self.character_str.append(line)
if use_space_char:
self.character_str.append(" ")
dict_character = list(self.character_str)
if 'arabic' in character_dict_path:
self.reverse = True
dict_character = self.add_special_char(dict_character)
self.dict = {}
for i, char in enumerate(dict_character):
self.dict[char] = i
self.character = dict_character
def pred_reverse(self, pred):
pred_re = []
c_current = ''
for c in pred:
if not bool(re.search('[a-zA-Z0-9 :*./%+-]', c)):
if c_current != '':
pred_re.append(c_current)
pred_re.append(c)
c_current = ''
else:
c_current += c
if c_current != '':
pred_re.append(c_current)
return ''.join(pred_re[::-1])
def add_special_char(self, dict_character):
return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
selection = np.ones(len(text_index[batch_idx]), dtype=bool)
if is_remove_duplicate:
selection[1:] = text_index[batch_idx][1:] != text_index[
batch_idx][:-1]
for ignored_token in ignored_tokens:
selection &= text_index[batch_idx] != ignored_token
char_list = [
self.character[text_id]
for text_id in text_index[batch_idx][selection]
]
if text_prob is not None:
conf_list = text_prob[batch_idx][selection]
else:
conf_list = [1] * len(selection)
if len(conf_list) == 0:
conf_list = [0]
text = ''.join(char_list)
if self.reverse: # for arabic rec
text = self.pred_reverse(text)
result_list.append((text, np.mean(conf_list).tolist()))
return result_list
def get_ignored_tokens(self):
return [0] # for ctc blank
class CTCLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self, character_dict_path=None, use_space_char=False,
**kwargs):
super(CTCLabelDecode, self).__init__(character_dict_path,
use_space_char)
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, tuple) or isinstance(preds, list):
preds = preds[-1]
if not isinstance(preds, np.ndarray):
preds = preds.numpy()
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
if label is None:
return text
label = self.decode(label)
return text, label
def add_special_char(self, dict_character):
dict_character = ['blank'] + dict_character
return dict_character
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/seeit.py | deepdoc/vision/seeit.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import PIL
from PIL import ImageDraw
def save_results(image_list, results, labels, output_dir='output/', threshold=0.5):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for idx, im in enumerate(image_list):
im = draw_box(im, results[idx], labels, threshold=threshold)
out_path = os.path.join(output_dir, f"{idx}.jpg")
im.save(out_path, quality=95)
logging.debug("save result to: " + out_path)
def draw_box(im, result, labels, threshold=0.5):
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
color_list = get_color_map_list(len(labels))
clsid2color = {n.lower():color_list[i] for i,n in enumerate(labels)}
result = [r for r in result if r["score"] >= threshold]
for dt in result:
color = tuple(clsid2color[dt["type"]])
xmin, ymin, xmax, ymax = dt["bbox"]
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=color)
# draw label
text = "{} {:.4f}".format(dt["type"], dt["score"])
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return im
def get_color_map_list(num_classes):
"""
Args:
num_classes (int): number of class
Returns:
color_map (list): RGB color list
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def imagedraw_textsize_c(draw, text):
if int(PIL.__version__.split('.')[0]) < 10:
tw, th = draw.textsize(text)
else:
left, top, right, bottom = draw.textbbox((0, 0), text)
tw, th = right - left, bottom - top
return tw, th
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/t_ocr.py | deepdoc/vision/t_ocr.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import os
import sys
sys.path.insert(
0,
os.path.abspath(
os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'../../')))
from deepdoc.vision.seeit import draw_box
from deepdoc.vision import OCR, init_in_out
import argparse
import numpy as np
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,2' #2 gpus, uncontinuous
os.environ['CUDA_VISIBLE_DEVICES'] = '0' #1 gpu
# os.environ['CUDA_VISIBLE_DEVICES'] = '' #cpu
def main(args):
import torch.cuda
cuda_devices = torch.cuda.device_count()
limiter = [asyncio.Semaphore(1) for _ in range(cuda_devices)] if cuda_devices > 1 else None
ocr = OCR()
images, outputs = init_in_out(args)
def __ocr(i, id, img):
print("Task {} start".format(i))
bxs = ocr(np.array(img), id)
bxs = [(line[0], line[1][0]) for line in bxs]
bxs = [{
"text": t,
"bbox": [b[0][0], b[0][1], b[1][0], b[-1][1]],
"type": "ocr",
"score": 1} for b, t in bxs if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]]
img = draw_box(images[i], bxs, ["ocr"], 1.)
img.save(outputs[i], quality=95)
with open(outputs[i] + ".txt", "w+", encoding='utf-8') as f:
f.write("\n".join([o["text"] for o in bxs]))
print("Task {} done".format(i))
async def __ocr_thread(i, id, img, limiter = None):
if limiter:
async with limiter:
print(f"Task {i} use device {id}")
await asyncio.to_thread(__ocr, i, id, img)
else:
await asyncio.to_thread(__ocr, i, id, img)
async def __ocr_launcher():
tasks = []
for i, img in enumerate(images):
dev_id = i % cuda_devices if cuda_devices > 1 else 0
semaphore = limiter[dev_id] if limiter else None
tasks.append(asyncio.create_task(__ocr_thread(i, dev_id, img, semaphore)))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error("OCR tasks failed: {}".format(e))
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
asyncio.run(__ocr_launcher())
print("OCR tasks are all done")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--inputs',
help="Directory where to store images or PDFs, or a file path to a single image or PDF",
required=True)
parser.add_argument('--output_dir', help="Directory where to store the output images. Default: './ocr_outputs'",
default="./ocr_outputs")
args = parser.parse_args()
main(args)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/ocr.py | deepdoc/vision/ocr.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
import logging
import copy
import time
import os
from huggingface_hub import snapshot_download
from common.file_utils import get_project_base_directory
from common.misc_utils import pip_install_torch
from common import settings
from .operators import * # noqa: F403
from . import operators
import math
import numpy as np
import cv2
import onnxruntime as ort
from .postprocess import build_post_process
loaded_models = {}
def transform(data, ops=None):
""" transform """
if ops is None:
ops = []
for op in ops:
data = op(data)
if data is None:
return None
return data
def create_operators(op_param_list, global_config=None):
"""
create operators based on the config
Args:
params(list): a dict list, used to create some operators
"""
assert isinstance(
op_param_list, list), ('operator config should be a list')
ops = []
for operator in op_param_list:
assert isinstance(operator,
dict) and len(operator) == 1, "yaml format error"
op_name = list(operator)[0]
param = {} if operator[op_name] is None else operator[op_name]
if global_config is not None:
param.update(global_config)
op = getattr(operators, op_name)(**param)
ops.append(op)
return ops
def load_model(model_dir, nm, device_id: int | None = None):
model_file_path = os.path.join(model_dir, nm + ".onnx")
model_cached_tag = model_file_path + str(device_id) if device_id is not None else model_file_path
global loaded_models
loaded_model = loaded_models.get(model_cached_tag)
if loaded_model:
logging.info(f"load_model {model_file_path} reuses cached model")
return loaded_model
if not os.path.exists(model_file_path):
raise ValueError("not find model file path {}".format(
model_file_path))
def cuda_is_available():
try:
pip_install_torch()
import torch
target_id = 0 if device_id is None else device_id
if torch.cuda.is_available() and torch.cuda.device_count() > target_id:
return True
except Exception:
return False
return False
options = ort.SessionOptions()
options.enable_cpu_mem_arena = False
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
options.intra_op_num_threads = 2
options.inter_op_num_threads = 2
# https://github.com/microsoft/onnxruntime/issues/9509#issuecomment-951546580
# Shrink GPU memory after execution
run_options = ort.RunOptions()
if cuda_is_available():
gpu_mem_limit_mb = int(os.environ.get("OCR_GPU_MEM_LIMIT_MB", "2048"))
arena_strategy = os.environ.get("OCR_ARENA_EXTEND_STRATEGY", "kNextPowerOfTwo")
provider_device_id = 0 if device_id is None else device_id
cuda_provider_options = {
"device_id": provider_device_id, # Use specific GPU
"gpu_mem_limit": max(gpu_mem_limit_mb, 0) * 1024 * 1024,
"arena_extend_strategy": arena_strategy, # gpu memory allocation strategy
}
sess = ort.InferenceSession(
model_file_path,
options=options,
providers=['CUDAExecutionProvider'],
provider_options=[cuda_provider_options]
)
logging.info(f"load_model {model_file_path} uses GPU (device {provider_device_id}, gpu_mem_limit={cuda_provider_options['gpu_mem_limit']}, arena_strategy={arena_strategy})")
else:
sess = ort.InferenceSession(
model_file_path,
options=options,
providers=['CPUExecutionProvider'])
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu")
logging.info(f"load_model {model_file_path} uses CPU")
loaded_model = (sess, run_options)
loaded_models[model_cached_tag] = loaded_model
return loaded_model
class TextRecognizer:
def __init__(self, model_dir, device_id: int | None = None):
self.rec_image_shape = [int(v) for v in "3, 48, 320".split(",")]
self.rec_batch_num = 16
postprocess_params = {
'name': 'CTCLabelDecode',
"character_dict_path": os.path.join(model_dir, "ocr.res"),
"use_space_char": True
}
self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.run_options = load_model(model_dir, 'rec', device_id)
self.input_tensor = self.predictor.get_inputs()[0]
def resize_norm_img(self, img, max_wh_ratio):
imgC, imgH, imgW = self.rec_image_shape
assert imgC == img.shape[2]
imgW = int((imgH * max_wh_ratio))
w = self.input_tensor.shape[3:][0]
if isinstance(w, str):
pass
elif w is not None and w > 0:
imgW = w
h, w = img.shape[:2]
ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW:
resized_w = imgW
else:
resized_w = int(math.ceil(imgH * ratio))
resized_image = cv2.resize(img, (resized_w, imgH))
resized_image = resized_image.astype('float32')
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
padding_im[:, :, 0:resized_w] = resized_image
return padding_im
def resize_norm_img_vl(self, img, image_shape):
imgC, imgH, imgW = image_shape
img = img[:, :, ::-1] # bgr2rgb
resized_image = cv2.resize(
img, (imgW, imgH), interpolation=cv2.INTER_LINEAR)
resized_image = resized_image.astype('float32')
resized_image = resized_image.transpose((2, 0, 1)) / 255
return resized_image
def resize_norm_img_srn(self, img, image_shape):
imgC, imgH, imgW = image_shape
img_black = np.zeros((imgH, imgW))
im_hei = img.shape[0]
im_wid = img.shape[1]
if im_wid <= im_hei * 1:
img_new = cv2.resize(img, (imgH * 1, imgH))
elif im_wid <= im_hei * 2:
img_new = cv2.resize(img, (imgH * 2, imgH))
elif im_wid <= im_hei * 3:
img_new = cv2.resize(img, (imgH * 3, imgH))
else:
img_new = cv2.resize(img, (imgW, imgH))
img_np = np.asarray(img_new)
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
img_black[:, 0:img_np.shape[1]] = img_np
img_black = img_black[:, :, np.newaxis]
row, col, c = img_black.shape
c = 1
return np.reshape(img_black, (c, row, col)).astype(np.float32)
def srn_other_inputs(self, image_shape, num_heads, max_text_length):
imgC, imgH, imgW = image_shape
feature_dim = int((imgH / 8) * (imgW / 8))
encoder_word_pos = np.array(range(0, feature_dim)).reshape(
(feature_dim, 1)).astype('int64')
gsrm_word_pos = np.array(range(0, max_text_length)).reshape(
(max_text_length, 1)).astype('int64')
gsrm_attn_bias_data = np.ones((1, max_text_length, max_text_length))
gsrm_slf_attn_bias1 = np.triu(gsrm_attn_bias_data, 1).reshape(
[-1, 1, max_text_length, max_text_length])
gsrm_slf_attn_bias1 = np.tile(
gsrm_slf_attn_bias1,
[1, num_heads, 1, 1]).astype('float32') * [-1e9]
gsrm_slf_attn_bias2 = np.tril(gsrm_attn_bias_data, -1).reshape(
[-1, 1, max_text_length, max_text_length])
gsrm_slf_attn_bias2 = np.tile(
gsrm_slf_attn_bias2,
[1, num_heads, 1, 1]).astype('float32') * [-1e9]
encoder_word_pos = encoder_word_pos[np.newaxis, :]
gsrm_word_pos = gsrm_word_pos[np.newaxis, :]
return [
encoder_word_pos, gsrm_word_pos, gsrm_slf_attn_bias1,
gsrm_slf_attn_bias2
]
def process_image_srn(self, img, image_shape, num_heads, max_text_length):
norm_img = self.resize_norm_img_srn(img, image_shape)
norm_img = norm_img[np.newaxis, :]
[encoder_word_pos, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2] = \
self.srn_other_inputs(image_shape, num_heads, max_text_length)
gsrm_slf_attn_bias1 = gsrm_slf_attn_bias1.astype(np.float32)
gsrm_slf_attn_bias2 = gsrm_slf_attn_bias2.astype(np.float32)
encoder_word_pos = encoder_word_pos.astype(np.int64)
gsrm_word_pos = gsrm_word_pos.astype(np.int64)
return (norm_img, encoder_word_pos, gsrm_word_pos, gsrm_slf_attn_bias1,
gsrm_slf_attn_bias2)
def resize_norm_img_sar(self, img, image_shape,
width_downsample_ratio=0.25):
imgC, imgH, imgW_min, imgW_max = image_shape
h = img.shape[0]
w = img.shape[1]
valid_ratio = 1.0
# make sure new_width is an integral multiple of width_divisor.
width_divisor = int(1 / width_downsample_ratio)
# resize
ratio = w / float(h)
resize_w = math.ceil(imgH * ratio)
if resize_w % width_divisor != 0:
resize_w = round(resize_w / width_divisor) * width_divisor
if imgW_min is not None:
resize_w = max(imgW_min, resize_w)
if imgW_max is not None:
valid_ratio = min(1.0, 1.0 * resize_w / imgW_max)
resize_w = min(imgW_max, resize_w)
resized_image = cv2.resize(img, (resize_w, imgH))
resized_image = resized_image.astype('float32')
# norm
if image_shape[0] == 1:
resized_image = resized_image / 255
resized_image = resized_image[np.newaxis, :]
else:
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
resize_shape = resized_image.shape
padding_im = -1.0 * np.ones((imgC, imgH, imgW_max), dtype=np.float32)
padding_im[:, :, 0:resize_w] = resized_image
pad_shape = padding_im.shape
return padding_im, resize_shape, pad_shape, valid_ratio
def resize_norm_img_spin(self, img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# return padding_im
img = cv2.resize(img, tuple([100, 32]), cv2.INTER_CUBIC)
img = np.array(img, np.float32)
img = np.expand_dims(img, -1)
img = img.transpose((2, 0, 1))
mean = [127.5]
std = [127.5]
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
mean = np.float32(mean.reshape(1, -1))
stdinv = 1 / np.float32(std.reshape(1, -1))
img -= mean
img *= stdinv
return img
def resize_norm_img_svtr(self, img, image_shape):
imgC, imgH, imgW = image_shape
resized_image = cv2.resize(
img, (imgW, imgH), interpolation=cv2.INTER_LINEAR)
resized_image = resized_image.astype('float32')
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
return resized_image
def resize_norm_img_abinet(self, img, image_shape):
imgC, imgH, imgW = image_shape
resized_image = cv2.resize(
img, (imgW, imgH), interpolation=cv2.INTER_LINEAR)
resized_image = resized_image.astype('float32')
resized_image = resized_image / 255.
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
resized_image = (
resized_image - mean[None, None, ...]) / std[None, None, ...]
resized_image = resized_image.transpose((2, 0, 1))
resized_image = resized_image.astype('float32')
return resized_image
def norm_img_can(self, img, image_shape):
img = cv2.cvtColor(
img, cv2.COLOR_BGR2GRAY) # CAN only predict gray scale image
if self.rec_image_shape[0] == 1:
h, w = img.shape
_, imgH, imgW = self.rec_image_shape
if h < imgH or w < imgW:
padding_h = max(imgH - h, 0)
padding_w = max(imgW - w, 0)
img_padded = np.pad(img, ((0, padding_h), (0, padding_w)),
'constant',
constant_values=(255))
img = img_padded
img = np.expand_dims(img, 0) / 255.0 # h,w,c -> c,h,w
img = img.astype('float32')
return img
def close(self):
# close session and release manually
logging.info('Close text recognizer.')
if hasattr(self, "predictor"):
del self.predictor
gc.collect()
def __call__(self, img_list):
img_num = len(img_list)
# Calculate the aspect ratio of all text bars
width_list = []
for img in img_list:
width_list.append(img.shape[1] / float(img.shape[0]))
# Sorting can speed up the recognition process
indices = np.argsort(np.array(width_list))
rec_res = [['', 0.0]] * img_num
batch_num = self.rec_batch_num
st = time.time()
for beg_img_no in range(0, img_num, batch_num):
end_img_no = min(img_num, beg_img_no + batch_num)
norm_img_batch = []
imgC, imgH, imgW = self.rec_image_shape[:3]
max_wh_ratio = imgW / imgH
# max_wh_ratio = 0
for ino in range(beg_img_no, end_img_no):
h, w = img_list[indices[ino]].shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
for ino in range(beg_img_no, end_img_no):
norm_img = self.resize_norm_img(img_list[indices[ino]],
max_wh_ratio)
norm_img = norm_img[np.newaxis, :]
norm_img_batch.append(norm_img)
norm_img_batch = np.concatenate(norm_img_batch)
norm_img_batch = norm_img_batch.copy()
input_dict = {}
input_dict[self.input_tensor.name] = norm_img_batch
for i in range(100000):
try:
outputs = self.predictor.run(None, input_dict, self.run_options)
break
except Exception as e:
if i >= 3:
raise e
time.sleep(5)
preds = outputs[0]
rec_result = self.postprocess_op(preds)
for rno in range(len(rec_result)):
rec_res[indices[beg_img_no + rno]] = rec_result[rno]
return rec_res, time.time() - st
def __del__(self):
self.close()
class TextDetector:
def __init__(self, model_dir, device_id: int | None = None):
pre_process_list = [{
'DetResizeForTest': {
'limit_side_len': 960,
'limit_type': "max",
}
}, {
'NormalizeImage': {
'std': [0.229, 0.224, 0.225],
'mean': [0.485, 0.456, 0.406],
'scale': '1./255.',
'order': 'hwc'
}
}, {
'ToCHWImage': None
}, {
'KeepKeys': {
'keep_keys': ['image', 'shape']
}
}]
postprocess_params = {"name": "DBPostProcess", "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000,
"unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"}
self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.run_options = load_model(model_dir, 'det', device_id)
self.input_tensor = self.predictor.get_inputs()[0]
img_h, img_w = self.input_tensor.shape[2:]
if isinstance(img_h, str) or isinstance(img_w, str):
pass
elif img_h is not None and img_w is not None and img_h > 0 and img_w > 0:
pre_process_list[0] = {
'DetResizeForTest': {
'image_shape': [img_h, img_w]
}
}
self.preprocess_op = create_operators(pre_process_list)
def order_points_clockwise(self, pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
tmp = np.delete(pts, (np.argmin(s), np.argmax(s)), axis=0)
diff = np.diff(np.array(tmp), axis=1)
rect[1] = tmp[np.argmin(diff)]
rect[3] = tmp[np.argmax(diff)]
return rect
def clip_det_res(self, points, img_height, img_width):
for pno in range(points.shape[0]):
points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
return points
def filter_tag_det_res(self, dt_boxes, image_shape):
img_height, img_width = image_shape[0:2]
dt_boxes_new = []
for box in dt_boxes:
if isinstance(box, list):
box = np.array(box)
box = self.order_points_clockwise(box)
box = self.clip_det_res(box, img_height, img_width)
rect_width = int(np.linalg.norm(box[0] - box[1]))
rect_height = int(np.linalg.norm(box[0] - box[3]))
if rect_width <= 3 or rect_height <= 3:
continue
dt_boxes_new.append(box)
dt_boxes = np.array(dt_boxes_new)
return dt_boxes
def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
img_height, img_width = image_shape[0:2]
dt_boxes_new = []
for box in dt_boxes:
if isinstance(box, list):
box = np.array(box)
box = self.clip_det_res(box, img_height, img_width)
dt_boxes_new.append(box)
dt_boxes = np.array(dt_boxes_new)
return dt_boxes
def close(self):
logging.info("Close text detector.")
if hasattr(self, "predictor"):
del self.predictor
gc.collect()
def __call__(self, img):
ori_im = img.copy()
data = {'image': img}
st = time.time()
data = transform(data, self.preprocess_op)
img, shape_list = data
if img is None:
return None, 0
img = np.expand_dims(img, axis=0)
shape_list = np.expand_dims(shape_list, axis=0)
img = img.copy()
input_dict = {}
input_dict[self.input_tensor.name] = img
for i in range(100000):
try:
outputs = self.predictor.run(None, input_dict, self.run_options)
break
except Exception as e:
if i >= 3:
raise e
time.sleep(5)
post_result = self.postprocess_op({"maps": outputs[0]}, shape_list)
dt_boxes = post_result[0]['points']
dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)
return dt_boxes, time.time() - st
def __del__(self):
self.close()
class OCR:
def __init__(self, model_dir=None):
"""
If you have trouble downloading HuggingFace models, -_^ this might help!!
For Linux:
export HF_ENDPOINT=https://hf-mirror.com
For Windows:
Good luck
^_-
"""
if not model_dir:
try:
model_dir = os.path.join(
get_project_base_directory(),
"rag/res/deepdoc")
# Append muti-gpus task to the list
if settings.PARALLEL_DEVICES > 0:
self.text_detector = []
self.text_recognizer = []
for device_id in range(settings.PARALLEL_DEVICES):
self.text_detector.append(TextDetector(model_dir, device_id))
self.text_recognizer.append(TextRecognizer(model_dir, device_id))
else:
self.text_detector = [TextDetector(model_dir)]
self.text_recognizer = [TextRecognizer(model_dir)]
except Exception:
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
local_dir_use_symlinks=False)
if settings.PARALLEL_DEVICES > 0:
self.text_detector = []
self.text_recognizer = []
for device_id in range(settings.PARALLEL_DEVICES):
self.text_detector.append(TextDetector(model_dir, device_id))
self.text_recognizer.append(TextRecognizer(model_dir, device_id))
else:
self.text_detector = [TextDetector(model_dir)]
self.text_recognizer = [TextRecognizer(model_dir)]
self.drop_score = 0.5
self.crop_image_res_index = 0
def get_rotate_crop_image(self, img, points):
"""
img_height, img_width = img.shape[0:2]
left = int(np.min(points[:, 0]))
right = int(np.max(points[:, 0]))
top = int(np.min(points[:, 1]))
bottom = int(np.max(points[:, 1]))
img_crop = img[top:bottom, left:right, :].copy()
points[:, 0] = points[:, 0] - left
points[:, 1] = points[:, 1] - top
"""
assert len(points) == 4, "shape of points must be 4*2"
img_crop_width = int(
max(
np.linalg.norm(points[0] - points[1]),
np.linalg.norm(points[2] - points[3])))
img_crop_height = int(
max(
np.linalg.norm(points[0] - points[3]),
np.linalg.norm(points[1] - points[2])))
pts_std = np.float32([[0, 0], [img_crop_width, 0],
[img_crop_width, img_crop_height],
[0, img_crop_height]])
M = cv2.getPerspectiveTransform(points, pts_std)
dst_img = cv2.warpPerspective(
img,
M, (img_crop_width, img_crop_height),
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC)
dst_img_height, dst_img_width = dst_img.shape[0:2]
if dst_img_height * 1.0 / dst_img_width >= 1.5:
# Try original orientation
rec_result = self.text_recognizer[0]([dst_img])
text, score = rec_result[0][0]
best_score = score
best_img = dst_img
# Try clockwise 90° rotation
rotated_cw = np.rot90(dst_img, k=3)
rec_result = self.text_recognizer[0]([rotated_cw])
rotated_cw_text, rotated_cw_score = rec_result[0][0]
if rotated_cw_score > best_score:
best_score = rotated_cw_score
best_img = rotated_cw
# Try counter-clockwise 90° rotation
rotated_ccw = np.rot90(dst_img, k=1)
rec_result = self.text_recognizer[0]([rotated_ccw])
rotated_ccw_text, rotated_ccw_score = rec_result[0][0]
if rotated_ccw_score > best_score:
best_img = rotated_ccw
# Use the best image
dst_img = best_img
return dst_img
def sorted_boxes(self, dt_boxes):
"""
Sort text boxes in order from top to bottom, left to right
args:
dt_boxes(array):detected text boxes with shape [4, 2]
return:
sorted boxes(array) with shape [4, 2]
"""
num_boxes = dt_boxes.shape[0]
sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
_boxes = list(sorted_boxes)
for i in range(num_boxes - 1):
for j in range(i, -1, -1):
if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and \
(_boxes[j + 1][0][0] < _boxes[j][0][0]):
tmp = _boxes[j]
_boxes[j] = _boxes[j + 1]
_boxes[j + 1] = tmp
else:
break
return _boxes
def detect(self, img, device_id: int | None = None):
if device_id is None:
device_id = 0
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
if img is None:
return None, None, time_dict
start = time.time()
dt_boxes, elapse = self.text_detector[device_id](img)
time_dict['det'] = elapse
if dt_boxes is None:
end = time.time()
time_dict['all'] = end - start
return None, None, time_dict
return zip(self.sorted_boxes(dt_boxes), [
("", 0) for _ in range(len(dt_boxes))])
def recognize(self, ori_im, box, device_id: int | None = None):
if device_id is None:
device_id = 0
img_crop = self.get_rotate_crop_image(ori_im, box)
rec_res, elapse = self.text_recognizer[device_id]([img_crop])
text, score = rec_res[0]
if score < self.drop_score:
return ""
return text
def recognize_batch(self, img_list, device_id: int | None = None):
if device_id is None:
device_id = 0
rec_res, elapse = self.text_recognizer[device_id](img_list)
texts = []
for i in range(len(rec_res)):
text, score = rec_res[i]
if score < self.drop_score:
text = ""
texts.append(text)
return texts
def __call__(self, img, device_id = 0, cls=True):
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
if device_id is None:
device_id = 0
if img is None:
return None, None, time_dict
start = time.time()
ori_im = img.copy()
dt_boxes, elapse = self.text_detector[device_id](img)
time_dict['det'] = elapse
if dt_boxes is None:
end = time.time()
time_dict['all'] = end - start
return None, None, time_dict
img_crop_list = []
dt_boxes = self.sorted_boxes(dt_boxes)
for bno in range(len(dt_boxes)):
tmp_box = copy.deepcopy(dt_boxes[bno])
img_crop = self.get_rotate_crop_image(ori_im, tmp_box)
img_crop_list.append(img_crop)
rec_res, elapse = self.text_recognizer[device_id](img_crop_list)
time_dict['rec'] = elapse
filter_boxes, filter_rec_res = [], []
for box, rec_result in zip(dt_boxes, rec_res):
text, score = rec_result
if score >= self.drop_score:
filter_boxes.append(box)
filter_rec_res.append(rec_result)
end = time.time()
time_dict['all'] = end - start
# for bno in range(len(img_crop_list)):
# print(f"{bno}, {rec_res[bno]}")
return list(zip([a.tolist() for a in filter_boxes], filter_rec_res))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/recognizer.py | deepdoc/vision/recognizer.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
import logging
import os
import math
import numpy as np
import cv2
from functools import cmp_to_key
from common.file_utils import get_project_base_directory
from .operators import * # noqa: F403
from .operators import preprocess
from . import operators
from .ocr import load_model
class Recognizer:
def __init__(self, label_list, task_name, model_dir=None):
"""
If you have trouble downloading HuggingFace models, -_^ this might help!!
For Linux:
export HF_ENDPOINT=https://hf-mirror.com
For Windows:
Good luck
^_-
"""
if not model_dir:
model_dir = os.path.join(
get_project_base_directory(),
"rag/res/deepdoc")
self.ort_sess, self.run_options = load_model(model_dir, task_name)
self.input_names = [node.name for node in self.ort_sess.get_inputs()]
self.output_names = [node.name for node in self.ort_sess.get_outputs()]
self.input_shape = self.ort_sess.get_inputs()[0].shape[2:4]
self.label_list = label_list
@staticmethod
def sort_Y_firstly(arr, threshold):
def cmp(c1, c2):
diff = c1["top"] - c2["top"]
if abs(diff) < threshold:
diff = c1["x0"] - c2["x0"]
return diff
arr = sorted(arr, key=cmp_to_key(cmp))
return arr
@staticmethod
def sort_X_firstly(arr, threshold):
def cmp(c1, c2):
diff = c1["x0"] - c2["x0"]
if abs(diff) < threshold:
diff = c1["top"] - c2["top"]
return diff
arr = sorted(arr, key=cmp_to_key(cmp))
return arr
@staticmethod
def sort_C_firstly(arr, thr=0):
# sort using y1 first and then x1
# sorted(arr, key=lambda r: (r["x0"], r["top"]))
arr = Recognizer.sort_X_firstly(arr, thr)
for i in range(len(arr) - 1):
for j in range(i, -1, -1):
# restore the order using th
if "C" not in arr[j] or "C" not in arr[j + 1]:
continue
if arr[j + 1]["C"] < arr[j]["C"] \
or (
arr[j + 1]["C"] == arr[j]["C"]
and arr[j + 1]["top"] < arr[j]["top"]
):
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
@staticmethod
def sort_R_firstly(arr, thr=0):
# sort using y1 first and then x1
# sorted(arr, key=lambda r: (r["top"], r["x0"]))
arr = Recognizer.sort_Y_firstly(arr, thr)
for i in range(len(arr) - 1):
for j in range(i, -1, -1):
if "R" not in arr[j] or "R" not in arr[j + 1]:
continue
if arr[j + 1]["R"] < arr[j]["R"] \
or (
arr[j + 1]["R"] == arr[j]["R"]
and arr[j + 1]["x0"] < arr[j]["x0"]
):
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
@staticmethod
def overlapped_area(a, b, ratio=True):
tp, btm, x0, x1 = a["top"], a["bottom"], a["x0"], a["x1"]
if b["x0"] > x1 or b["x1"] < x0:
return 0
if b["bottom"] < tp or b["top"] > btm:
return 0
x0_ = max(b["x0"], x0)
x1_ = min(b["x1"], x1)
assert x0_ <= x1_, "Bbox mismatch! T:{},B:{},X0:{},X1:{} ==> {}".format(
tp, btm, x0, x1, b)
tp_ = max(b["top"], tp)
btm_ = min(b["bottom"], btm)
assert tp_ <= btm_, "Bbox mismatch! T:{},B:{},X0:{},X1:{} => {}".format(
tp, btm, x0, x1, b)
ov = (btm_ - tp_) * (x1_ - x0_) if x1 - \
x0 != 0 and btm - tp != 0 else 0
if ov > 0 and ratio:
ov /= (x1 - x0) * (btm - tp)
return ov
@staticmethod
def layouts_cleanup(boxes, layouts, far=2, thr=0.7):
def not_overlapped(a, b):
return any([a["x1"] < b["x0"],
a["x0"] > b["x1"],
a["bottom"] < b["top"],
a["top"] > b["bottom"]])
i = 0
while i + 1 < len(layouts):
j = i + 1
while j < min(i + far, len(layouts)) \
and (layouts[i].get("type", "") != layouts[j].get("type", "")
or not_overlapped(layouts[i], layouts[j])):
j += 1
if j >= min(i + far, len(layouts)):
i += 1
continue
if Recognizer.overlapped_area(layouts[i], layouts[j]) < thr \
and Recognizer.overlapped_area(layouts[j], layouts[i]) < thr:
i += 1
continue
if layouts[i].get("score") and layouts[j].get("score"):
if layouts[i]["score"] > layouts[j]["score"]:
layouts.pop(j)
else:
layouts.pop(i)
continue
area_i, area_i_1 = 0, 0
for b in boxes:
if not not_overlapped(b, layouts[i]):
area_i += Recognizer.overlapped_area(b, layouts[i], False)
if not not_overlapped(b, layouts[j]):
area_i_1 += Recognizer.overlapped_area(b, layouts[j], False)
if area_i > area_i_1:
layouts.pop(j)
else:
layouts.pop(i)
return layouts
def create_inputs(self, imgs, im_info):
"""generate input for different model type
Args:
imgs (list(numpy)): list of images (np.ndarray)
im_info (list(dict)): list of image info
Returns:
inputs (dict): input of model
"""
inputs = {}
im_shape = []
scale_factor = []
if len(imgs) == 1:
inputs['image'] = np.array((imgs[0],)).astype('float32')
inputs['im_shape'] = np.array(
(im_info[0]['im_shape'],)).astype('float32')
inputs['scale_factor'] = np.array(
(im_info[0]['scale_factor'],)).astype('float32')
return inputs
im_shape = np.array([info['im_shape'] for info in im_info], dtype='float32')
scale_factor = np.array([info['scale_factor'] for info in im_info], dtype='float32')
inputs['im_shape'] = np.concatenate(im_shape, axis=0)
inputs['scale_factor'] = np.concatenate(scale_factor, axis=0)
imgs_shape = [[e.shape[1], e.shape[2]] for e in imgs]
max_shape_h = max([e[0] for e in imgs_shape])
max_shape_w = max([e[1] for e in imgs_shape])
padding_imgs = []
for img in imgs:
im_c, im_h, im_w = img.shape[:]
padding_im = np.zeros(
(im_c, max_shape_h, max_shape_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = img
padding_imgs.append(padding_im)
inputs['image'] = np.stack(padding_imgs, axis=0)
return inputs
@staticmethod
def find_overlapped(box, boxes_sorted_by_y, naive=False):
if not boxes_sorted_by_y:
return
bxs = boxes_sorted_by_y
s, e, ii = 0, len(bxs), 0
while s < e and not naive:
ii = (e + s) // 2
pv = bxs[ii]
if box["bottom"] < pv["top"]:
e = ii
continue
if box["top"] > pv["bottom"]:
s = ii + 1
continue
break
while s < ii:
if box["top"] > bxs[s]["bottom"]:
s += 1
break
while e - 1 > ii:
if box["bottom"] < bxs[e - 1]["top"]:
e -= 1
break
max_overlapped_i, max_overlapped = None, 0
for i in range(s, e):
ov = Recognizer.overlapped_area(bxs[i], box)
if ov <= max_overlapped:
continue
max_overlapped_i = i
max_overlapped = ov
return max_overlapped_i
@staticmethod
def find_horizontally_tightest_fit(box, boxes):
if not boxes:
return
min_dis, min_i = 1000000, None
for i,b in enumerate(boxes):
if box.get("layoutno", "0") != b.get("layoutno", "0"):
continue
dis = min(abs(box["x0"] - b["x0"]), abs(box["x1"] - b["x1"]), abs(box["x0"]+box["x1"] - b["x1"] - b["x0"])/2)
if dis < min_dis:
min_i = i
min_dis = dis
return min_i
@staticmethod
def find_overlapped_with_threshold(box, boxes, thr=0.3):
if not boxes:
return
max_overlapped_i, max_overlapped, _max_overlapped = None, thr, 0
s, e = 0, len(boxes)
for i in range(s, e):
ov = Recognizer.overlapped_area(box, boxes[i])
_ov = Recognizer.overlapped_area(boxes[i], box)
if (ov, _ov) < (max_overlapped, _max_overlapped):
continue
max_overlapped_i = i
max_overlapped = ov
_max_overlapped = _ov
return max_overlapped_i
def preprocess(self, image_list):
inputs = []
if "scale_factor" in self.input_names:
preprocess_ops = []
for op_info in [
{'interp': 2, 'keep_ratio': False, 'target_size': [800, 608], 'type': 'LinearResize'},
{'is_scale': True, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'type': 'StandardizeImage'},
{'type': 'Permute'},
{'stride': 32, 'type': 'PadStride'}
]:
new_op_info = op_info.copy()
op_type = new_op_info.pop('type')
preprocess_ops.append(getattr(operators, op_type)(**new_op_info))
for im_path in image_list:
im, im_info = preprocess(im_path, preprocess_ops)
inputs.append({"image": np.array((im,)).astype('float32'),
"scale_factor": np.array((im_info["scale_factor"],)).astype('float32')})
else:
hh, ww = self.input_shape
for img in image_list:
h, w = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(np.array(img).astype('float32'), (ww, hh))
# Scale input pixel values to 0 to 1
img /= 255.0
img = img.transpose(2, 0, 1)
img = img[np.newaxis, :, :, :].astype(np.float32)
inputs.append({self.input_names[0]: img, "scale_factor": [w/ww, h/hh]})
return inputs
def postprocess(self, boxes, inputs, thr):
if "scale_factor" in self.input_names:
bb = []
for b in boxes:
clsid, bbox, score = int(b[0]), b[2:], b[1]
if score < thr:
continue
if clsid >= len(self.label_list):
continue
bb.append({
"type": self.label_list[clsid].lower(),
"bbox": [float(t) for t in bbox.tolist()],
"score": float(score)
})
return bb
def xywh2xyxy(x):
# [x, y, w, h] to [x1, y1, x2, y2]
y = np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
return y
def compute_iou(box, boxes):
# Compute xmin, ymin, xmax, ymax for both boxes
xmin = np.maximum(box[0], boxes[:, 0])
ymin = np.maximum(box[1], boxes[:, 1])
xmax = np.minimum(box[2], boxes[:, 2])
ymax = np.minimum(box[3], boxes[:, 3])
# Compute intersection area
intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)
# Compute union area
box_area = (box[2] - box[0]) * (box[3] - box[1])
boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
union_area = box_area + boxes_area - intersection_area
# Compute IoU
iou = intersection_area / union_area
return iou
def iou_filter(boxes, scores, iou_threshold):
sorted_indices = np.argsort(scores)[::-1]
keep_boxes = []
while sorted_indices.size > 0:
# Pick the last box
box_id = sorted_indices[0]
keep_boxes.append(box_id)
# Compute IoU of the picked box with the rest
ious = compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :])
# Remove boxes with IoU over the threshold
keep_indices = np.where(ious < iou_threshold)[0]
# print(keep_indices.shape, sorted_indices.shape)
sorted_indices = sorted_indices[keep_indices + 1]
return keep_boxes
boxes = np.squeeze(boxes).T
# Filter out object confidence scores below threshold
scores = np.max(boxes[:, 4:], axis=1)
boxes = boxes[scores > thr, :]
scores = scores[scores > thr]
if len(boxes) == 0:
return []
# Get the class with the highest confidence
class_ids = np.argmax(boxes[:, 4:], axis=1)
boxes = boxes[:, :4]
input_shape = np.array([inputs["scale_factor"][0], inputs["scale_factor"][1], inputs["scale_factor"][0], inputs["scale_factor"][1]])
boxes = np.multiply(boxes, input_shape, dtype=np.float32)
boxes = xywh2xyxy(boxes)
unique_class_ids = np.unique(class_ids)
indices = []
for class_id in unique_class_ids:
class_indices = np.where(class_ids == class_id)[0]
class_boxes = boxes[class_indices, :]
class_scores = scores[class_indices]
class_keep_boxes = iou_filter(class_boxes, class_scores, 0.2)
indices.extend(class_indices[class_keep_boxes])
return [{
"type": self.label_list[class_ids[i]].lower(),
"bbox": [float(t) for t in boxes[i].tolist()],
"score": float(scores[i])
} for i in indices]
def close(self):
logging.info("Close recognizer.")
if hasattr(self, "ort_sess"):
del self.ort_sess
gc.collect()
def __call__(self, image_list, thr=0.7, batch_size=16):
res = []
images = []
for i in range(len(image_list)):
if not isinstance(image_list[i], np.ndarray):
images.append(np.array(image_list[i]))
else:
images.append(image_list[i])
batch_loop_cnt = math.ceil(float(len(images)) / batch_size)
for i in range(batch_loop_cnt):
start_index = i * batch_size
end_index = min((i + 1) * batch_size, len(images))
batch_image_list = images[start_index:end_index]
inputs = self.preprocess(batch_image_list)
logging.debug("preprocess")
for ins in inputs:
bb = self.postprocess(self.ort_sess.run(None, {k:v for k,v in ins.items() if k in self.input_names}, self.run_options)[0], ins, thr)
res.append(bb)
#seeit.save_results(image_list, res, self.label_list, threshold=thr)
return res
def __del__(self):
self.close()
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/__init__.py | deepdoc/vision/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import sys
import threading
import pdfplumber
from .ocr import OCR
from .recognizer import Recognizer
from .layout_recognizer import AscendLayoutRecognizer
from .layout_recognizer import LayoutRecognizer4YOLOv10 as LayoutRecognizer
from .table_structure_recognizer import TableStructureRecognizer
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
if LOCK_KEY_pdfplumber not in sys.modules:
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
def init_in_out(args):
import os
import traceback
from PIL import Image
from common.file_utils import traversal_files
images = []
outputs = []
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
def pdf_pages(fnm, zoomin=3):
nonlocal outputs, images
with sys.modules[LOCK_KEY_pdfplumber]:
pdf = pdfplumber.open(fnm)
images = [p.to_image(resolution=72 * zoomin).annotated for i, p in enumerate(pdf.pages)]
for i, page in enumerate(images):
outputs.append(os.path.split(fnm)[-1] + f"_{i}.jpg")
pdf.close()
def images_and_outputs(fnm):
nonlocal outputs, images
if fnm.split(".")[-1].lower() == "pdf":
pdf_pages(fnm)
return
try:
fp = open(fnm, "rb")
binary = fp.read()
fp.close()
images.append(Image.open(io.BytesIO(binary)).convert("RGB"))
outputs.append(os.path.split(fnm)[-1])
except Exception:
traceback.print_exc()
if os.path.isdir(args.inputs):
for fnm in traversal_files(args.inputs):
images_and_outputs(fnm)
else:
images_and_outputs(args.inputs)
for i in range(len(outputs)):
outputs[i] = os.path.join(args.output_dir, outputs[i])
return images, outputs
__all__ = [
"OCR",
"Recognizer",
"LayoutRecognizer",
"AscendLayoutRecognizer",
"TableStructureRecognizer",
"init_in_out",
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/operators.py | deepdoc/vision/operators.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import sys
import ast
import six
import cv2
import numpy as np
import math
from PIL import Image
class DecodeImage:
""" decode image """
def __init__(self,
img_mode='RGB',
channel_first=False,
ignore_orientation=False,
**kwargs):
self.img_mode = img_mode
self.channel_first = channel_first
self.ignore_orientation = ignore_orientation
def __call__(self, data):
img = data['image']
if six.PY2:
assert isinstance(img, str) and len(
img) > 0, "invalid input 'img' in DecodeImage"
else:
assert isinstance(img, bytes) and len(
img) > 0, "invalid input 'img' in DecodeImage"
img = np.frombuffer(img, dtype='uint8')
if self.ignore_orientation:
img = cv2.imdecode(img, cv2.IMREAD_IGNORE_ORIENTATION |
cv2.IMREAD_COLOR)
else:
img = cv2.imdecode(img, 1)
if img is None:
return None
if self.img_mode == 'GRAY':
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif self.img_mode == 'RGB':
assert img.shape[2] == 3, 'invalid shape of image[%s]' % (
img.shape)
img = img[:, :, ::-1]
if self.channel_first:
img = img.transpose((2, 0, 1))
data['image'] = img
return data
class StandardizeImag:
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
norm_type (str): type in ['mean_std', 'none']
"""
def __init__(self, mean, std, is_scale=True, norm_type='mean_std'):
self.mean = mean
self.std = std
self.is_scale = is_scale
self.norm_type = norm_type
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im -= mean
im /= std
return im, im_info
class NormalizeImage:
""" normalize image such as subtract mean, divide std
"""
def __init__(self, scale=None, mean=None, std=None, order='chw', **kwargs):
if isinstance(scale, str):
try:
scale = float(scale)
except ValueError:
if '/' in scale:
parts = scale.split('/')
scale = ast.literal_eval(parts[0]) / ast.literal_eval(parts[1])
else:
scale = ast.literal_eval(scale)
self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
mean = mean if mean is not None else [0.485, 0.456, 0.406]
std = std if std is not None else [0.229, 0.224, 0.225]
shape = (3, 1, 1) if order == 'chw' else (1, 1, 3)
self.mean = np.array(mean).reshape(shape).astype('float32')
self.std = np.array(std).reshape(shape).astype('float32')
def __call__(self, data):
img = data['image']
from PIL import Image
if isinstance(img, Image.Image):
img = np.array(img)
assert isinstance(img,
np.ndarray), "invalid input 'img' in NormalizeImage"
data['image'] = (
img.astype('float32') * self.scale - self.mean) / self.std
return data
class ToCHWImage:
""" convert hwc image to chw image
"""
def __init__(self, **kwargs):
pass
def __call__(self, data):
img = data['image']
from PIL import Image
if isinstance(img, Image.Image):
img = np.array(img)
data['image'] = img.transpose((2, 0, 1))
return data
class KeepKeys:
def __init__(self, keep_keys, **kwargs):
self.keep_keys = keep_keys
def __call__(self, data):
data_list = []
for key in self.keep_keys:
data_list.append(data[key])
return data_list
class Pad:
def __init__(self, size=None, size_div=32, **kwargs):
if size is not None and not isinstance(size, (int, list, tuple)):
raise TypeError("Type of target_size is invalid. Now is {}".format(
type(size)))
if isinstance(size, int):
size = [size, size]
self.size = size
self.size_div = size_div
def __call__(self, data):
img = data['image']
img_h, img_w = img.shape[0], img.shape[1]
if self.size:
resize_h2, resize_w2 = self.size
assert (
img_h < resize_h2 and img_w < resize_w2
), '(h, w) of target size should be greater than (img_h, img_w)'
else:
resize_h2 = max(
int(math.ceil(img.shape[0] / self.size_div) * self.size_div),
self.size_div)
resize_w2 = max(
int(math.ceil(img.shape[1] / self.size_div) * self.size_div),
self.size_div)
img = cv2.copyMakeBorder(
img,
0,
resize_h2 - img_h,
0,
resize_w2 - img_w,
cv2.BORDER_CONSTANT,
value=0)
data['image'] = img
return data
class LinearResize:
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
_im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
_im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class Resize:
def __init__(self, size=(640, 640), **kwargs):
self.size = size
def resize_image(self, img):
resize_h, resize_w = self.size
ori_h, ori_w = img.shape[:2] # (h, w, c)
ratio_h = float(resize_h) / ori_h
ratio_w = float(resize_w) / ori_w
img = cv2.resize(img, (int(resize_w), int(resize_h)))
return img, [ratio_h, ratio_w]
def __call__(self, data):
img = data['image']
if 'polys' in data:
text_polys = data['polys']
img_resize, [ratio_h, ratio_w] = self.resize_image(img)
if 'polys' in data:
new_boxes = []
for box in text_polys:
new_box = []
for cord in box:
new_box.append([cord[0] * ratio_w, cord[1] * ratio_h])
new_boxes.append(new_box)
data['polys'] = np.array(new_boxes, dtype=np.float32)
data['image'] = img_resize
return data
class DetResizeForTest:
def __init__(self, **kwargs):
super(DetResizeForTest, self).__init__()
self.resize_type = 0
self.keep_ratio = False
if 'image_shape' in kwargs:
self.image_shape = kwargs['image_shape']
self.resize_type = 1
if 'keep_ratio' in kwargs:
self.keep_ratio = kwargs['keep_ratio']
elif 'limit_side_len' in kwargs:
self.limit_side_len = kwargs['limit_side_len']
self.limit_type = kwargs.get('limit_type', 'min')
elif 'resize_long' in kwargs:
self.resize_type = 2
self.resize_long = kwargs.get('resize_long', 960)
else:
self.limit_side_len = 736
self.limit_type = 'min'
def __call__(self, data):
img = data['image']
src_h, src_w, _ = img.shape
if sum([src_h, src_w]) < 64:
img = self.image_padding(img)
if self.resize_type == 0:
# img, shape = self.resize_image_type0(img)
img, [ratio_h, ratio_w] = self.resize_image_type0(img)
elif self.resize_type == 2:
img, [ratio_h, ratio_w] = self.resize_image_type2(img)
else:
# img, shape = self.resize_image_type1(img)
img, [ratio_h, ratio_w] = self.resize_image_type1(img)
data['image'] = img
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
return data
def image_padding(self, im, value=0):
h, w, c = im.shape
im_pad = np.zeros((max(32, h), max(32, w), c), np.uint8) + value
im_pad[:h, :w, :] = im
return im_pad
def resize_image_type1(self, img):
resize_h, resize_w = self.image_shape
ori_h, ori_w = img.shape[:2] # (h, w, c)
if self.keep_ratio is True:
resize_w = ori_w * resize_h / ori_h
N = math.ceil(resize_w / 32)
resize_w = N * 32
ratio_h = float(resize_h) / ori_h
ratio_w = float(resize_w) / ori_w
img = cv2.resize(img, (int(resize_w), int(resize_h)))
# return img, np.array([ori_h, ori_w])
return img, [ratio_h, ratio_w]
def resize_image_type0(self, img):
"""
resize image to a size multiple of 32 which is required by the network
args:
img(array): array with shape [h, w, c]
return(tuple):
img, (ratio_h, ratio_w)
"""
limit_side_len = self.limit_side_len
h, w, c = img.shape
# limit the max side
if self.limit_type == 'max':
if max(h, w) > limit_side_len:
if h > w:
ratio = float(limit_side_len) / h
else:
ratio = float(limit_side_len) / w
else:
ratio = 1.
elif self.limit_type == 'min':
if min(h, w) < limit_side_len:
if h < w:
ratio = float(limit_side_len) / h
else:
ratio = float(limit_side_len) / w
else:
ratio = 1.
elif self.limit_type == 'resize_long':
ratio = float(limit_side_len) / max(h, w)
else:
raise Exception('not support limit type, image ')
resize_h = int(h * ratio)
resize_w = int(w * ratio)
resize_h = max(int(round(resize_h / 32) * 32), 32)
resize_w = max(int(round(resize_w / 32) * 32), 32)
try:
if int(resize_w) <= 0 or int(resize_h) <= 0:
return None, (None, None)
img = cv2.resize(img, (int(resize_w), int(resize_h)))
except BaseException:
logging.exception("{} {} {}".format(img.shape, resize_w, resize_h))
sys.exit(0)
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return img, [ratio_h, ratio_w]
def resize_image_type2(self, img):
h, w, _ = img.shape
resize_w = w
resize_h = h
if resize_h > resize_w:
ratio = float(self.resize_long) / resize_h
else:
ratio = float(self.resize_long) / resize_w
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
max_stride = 128
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
img = cv2.resize(img, (int(resize_w), int(resize_h)))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return img, [ratio_h, ratio_w]
class E2EResizeForTest:
def __init__(self, **kwargs):
super(E2EResizeForTest, self).__init__()
self.max_side_len = kwargs['max_side_len']
self.valid_set = kwargs['valid_set']
def __call__(self, data):
img = data['image']
src_h, src_w, _ = img.shape
if self.valid_set == 'totaltext':
im_resized, [ratio_h, ratio_w] = self.resize_image_for_totaltext(
img, max_side_len=self.max_side_len)
else:
im_resized, (ratio_h, ratio_w) = self.resize_image(
img, max_side_len=self.max_side_len)
data['image'] = im_resized
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
return data
def resize_image_for_totaltext(self, im, max_side_len=512):
h, w, _ = im.shape
resize_w = w
resize_h = h
ratio = 1.25
if h * ratio > max_side_len:
ratio = float(max_side_len) / resize_h
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
max_stride = 128
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
im = cv2.resize(im, (int(resize_w), int(resize_h)))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return im, (ratio_h, ratio_w)
def resize_image(self, im, max_side_len=512):
"""
resize image to a size multiple of max_stride which is required by the network
:param im: the resized image
:param max_side_len: limit of max image size to avoid out of memory in gpu
:return: the resized image and the resize ratio
"""
h, w, _ = im.shape
resize_w = w
resize_h = h
# Fix the longer side
if resize_h > resize_w:
ratio = float(max_side_len) / resize_h
else:
ratio = float(max_side_len) / resize_w
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
max_stride = 128
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
im = cv2.resize(im, (int(resize_w), int(resize_h)))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return im, (ratio_h, ratio_w)
class KieResize:
def __init__(self, **kwargs):
super(KieResize, self).__init__()
self.max_side, self.min_side = kwargs['img_scale'][0], kwargs[
'img_scale'][1]
def __call__(self, data):
img = data['image']
points = data['points']
src_h, src_w, _ = img.shape
im_resized, scale_factor, [ratio_h, ratio_w
], [new_h, new_w] = self.resize_image(img)
resize_points = self.resize_boxes(img, points, scale_factor)
data['ori_image'] = img
data['ori_boxes'] = points
data['points'] = resize_points
data['image'] = im_resized
data['shape'] = np.array([new_h, new_w])
return data
def resize_image(self, img):
norm_img = np.zeros([1024, 1024, 3], dtype='float32')
scale = [512, 1024]
h, w = img.shape[:2]
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
resize_w, resize_h = int(w * float(scale_factor) + 0.5), int(h * float(
scale_factor) + 0.5)
max_stride = 32
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
im = cv2.resize(img, (resize_w, resize_h))
new_h, new_w = im.shape[:2]
w_scale = new_w / w
h_scale = new_h / h
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
norm_img[:new_h, :new_w, :] = im
return norm_img, scale_factor, [h_scale, w_scale], [new_h, new_w]
def resize_boxes(self, im, points, scale_factor):
points = points * scale_factor
img_shape = im.shape[:2]
points[:, 0::2] = np.clip(points[:, 0::2], 0, img_shape[1])
points[:, 1::2] = np.clip(points[:, 1::2], 0, img_shape[0])
return points
class SRResize:
def __init__(self,
imgH=32,
imgW=128,
down_sample_scale=4,
keep_ratio=False,
min_ratio=1,
mask=False,
infer_mode=False,
**kwargs):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
self.down_sample_scale = down_sample_scale
self.mask = mask
self.infer_mode = infer_mode
def __call__(self, data):
imgH = self.imgH
imgW = self.imgW
images_lr = data["image_lr"]
transform2 = ResizeNormalize(
(imgW // self.down_sample_scale, imgH // self.down_sample_scale))
images_lr = transform2(images_lr)
data["img_lr"] = images_lr
if self.infer_mode:
return data
images_HR = data["image_hr"]
_label_strs = data["label"]
transform = ResizeNormalize((imgW, imgH))
images_HR = transform(images_HR)
data["img_hr"] = images_HR
return data
class ResizeNormalize:
def __init__(self, size, interpolation=Image.BICUBIC):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img_numpy = np.array(img).astype("float32")
img_numpy = img_numpy.transpose((2, 0, 1)) / 255
return img_numpy
class GrayImageChannelFormat:
"""
format gray scale image's channel: (3,h,w) -> (1,h,w)
Args:
inverse: inverse gray image
"""
def __init__(self, inverse=False, **kwargs):
self.inverse = inverse
def __call__(self, data):
img = data['image']
img_single_channel = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_expanded = np.expand_dims(img_single_channel, 0)
if self.inverse:
data['image'] = np.abs(img_expanded - 1)
else:
data['image'] = img_expanded
data['src_image'] = img
return data
class Permute:
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride:
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
def preprocess(im, preprocess_ops):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im, im_info = decode_image(im, im_info)
for operator in preprocess_ops:
im, im_info = operator(im, im_info)
return im, im_info
def nms(bboxes, scores, iou_thresh):
import numpy as np
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
areas = (y2 - y1) * (x2 - x1)
indices = []
index = scores.argsort()[::-1]
while index.size > 0:
i = index[0]
indices.append(i)
x11 = np.maximum(x1[i], x1[index[1:]])
y11 = np.maximum(y1[i], y1[index[1:]])
x22 = np.minimum(x2[i], x2[index[1:]])
y22 = np.minimum(y2[i], y2[index[1:]])
w = np.maximum(0, x22 - x11 + 1)
h = np.maximum(0, y22 - y11 + 1)
overlaps = w * h
ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
idx = np.where(ious <= iou_thresh)[0]
index = index[idx + 1]
return indices
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/table_structure_recognizer.py | deepdoc/vision/table_structure_recognizer.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
from collections import Counter
import numpy as np
from huggingface_hub import snapshot_download
from common.file_utils import get_project_base_directory
from rag.nlp import rag_tokenizer
from .recognizer import Recognizer
class TableStructureRecognizer(Recognizer):
labels = [
"table",
"table column",
"table row",
"table column header",
"table projected row header",
"table spanning cell",
]
def __init__(self):
try:
super().__init__(self.labels, "tsr", os.path.join(get_project_base_directory(), "rag/res/deepdoc"))
except Exception:
super().__init__(
self.labels,
"tsr",
snapshot_download(
repo_id="InfiniFlow/deepdoc",
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
local_dir_use_symlinks=False,
),
)
def __call__(self, images, thr=0.2):
table_structure_recognizer_type = os.getenv("TABLE_STRUCTURE_RECOGNIZER_TYPE", "onnx").lower()
if table_structure_recognizer_type not in ["onnx", "ascend"]:
raise RuntimeError("Unsupported table structure recognizer type.")
if table_structure_recognizer_type == "onnx":
logging.debug("Using Onnx table structure recognizer")
tbls = super().__call__(images, thr)
else: # ascend
logging.debug("Using Ascend table structure recognizer")
tbls = self._run_ascend_tsr(images, thr)
res = []
# align left&right for rows, align top&bottom for columns
for tbl in tbls:
lts = [
{
"label": b["type"],
"score": b["score"],
"x0": b["bbox"][0],
"x1": b["bbox"][2],
"top": b["bbox"][1],
"bottom": b["bbox"][-1],
}
for b in tbl
]
if not lts:
continue
left = [b["x0"] for b in lts if b["label"].find("row") > 0 or b["label"].find("header") > 0]
right = [b["x1"] for b in lts if b["label"].find("row") > 0 or b["label"].find("header") > 0]
if not left:
continue
left = np.mean(left) if len(left) > 4 else np.min(left)
right = np.mean(right) if len(right) > 4 else np.max(right)
for b in lts:
if b["label"].find("row") > 0 or b["label"].find("header") > 0:
if b["x0"] > left:
b["x0"] = left
if b["x1"] < right:
b["x1"] = right
top = [b["top"] for b in lts if b["label"] == "table column"]
bottom = [b["bottom"] for b in lts if b["label"] == "table column"]
if not top:
res.append(lts)
continue
top = np.median(top) if len(top) > 4 else np.min(top)
bottom = np.median(bottom) if len(bottom) > 4 else np.max(bottom)
for b in lts:
if b["label"] == "table column":
if b["top"] > top:
b["top"] = top
if b["bottom"] < bottom:
b["bottom"] = bottom
res.append(lts)
return res
@staticmethod
def is_caption(bx):
patt = [r"[图表]+[ 0-9::]{2,}"]
if any([re.match(p, bx["text"].strip()) for p in patt]) or bx.get("layout_type", "").find("caption") >= 0:
return True
return False
@staticmethod
def blockType(b):
patt = [
("^(20|19)[0-9]{2}[年/-][0-9]{1,2}[月/-][0-9]{1,2}日*$", "Dt"),
(r"^(20|19)[0-9]{2}年$", "Dt"),
(r"^(20|19)[0-9]{2}[年-][0-9]{1,2}月*$", "Dt"),
("^[0-9]{1,2}[月-][0-9]{1,2}日*$", "Dt"),
(r"^第*[一二三四1-4]季度$", "Dt"),
(r"^(20|19)[0-9]{2}年*[一二三四1-4]季度$", "Dt"),
(r"^(20|19)[0-9]{2}[ABCDE]$", "Dt"),
("^[0-9.,+%/ -]+$", "Nu"),
(r"^[0-9A-Z/\._~-]+$", "Ca"),
(r"^[A-Z]*[a-z' -]+$", "En"),
(r"^[0-9.,+-]+[0-9A-Za-z/$¥%<>()()' -]+$", "NE"),
(r"^.{1}$", "Sg"),
]
for p, n in patt:
if re.search(p, b["text"].strip()):
return n
tks = [t for t in rag_tokenizer.tokenize(b["text"]).split() if len(t) > 1]
if len(tks) > 3:
if len(tks) < 12:
return "Tx"
else:
return "Lx"
if len(tks) == 1 and rag_tokenizer.tag(tks[0]) == "nr":
return "Nr"
return "Ot"
@staticmethod
def construct_table(boxes, is_english=False, html=True, **kwargs):
cap = ""
i = 0
while i < len(boxes):
if TableStructureRecognizer.is_caption(boxes[i]):
if is_english:
cap += " "
cap += boxes[i]["text"]
boxes.pop(i)
i -= 1
i += 1
if not boxes:
return []
for b in boxes:
b["btype"] = TableStructureRecognizer.blockType(b)
max_type = Counter([b["btype"] for b in boxes]).items()
max_type = max(max_type, key=lambda x: x[1])[0] if max_type else ""
logging.debug("MAXTYPE: " + max_type)
rowh = [b["R_bott"] - b["R_top"] for b in boxes if "R" in b]
rowh = np.min(rowh) if rowh else 0
boxes = Recognizer.sort_R_firstly(boxes, rowh / 2)
# for b in boxes:print(b)
boxes[0]["rn"] = 0
rows = [[boxes[0]]]
btm = boxes[0]["bottom"]
for b in boxes[1:]:
b["rn"] = len(rows) - 1
lst_r = rows[-1]
if lst_r[-1].get("R", "") != b.get("R", "") or (b["top"] >= btm - 3 and lst_r[-1].get("R", "-1") != b.get("R", "-2")): # new row
btm = b["bottom"]
b["rn"] += 1
rows.append([b])
continue
btm = (btm + b["bottom"]) / 2.0
rows[-1].append(b)
colwm = [b["C_right"] - b["C_left"] for b in boxes if "C" in b]
colwm = np.min(colwm) if colwm else 0
crosspage = len(set([b["page_number"] for b in boxes])) > 1
if crosspage:
boxes = Recognizer.sort_X_firstly(boxes, colwm / 2)
else:
boxes = Recognizer.sort_C_firstly(boxes, colwm / 2)
boxes[0]["cn"] = 0
cols = [[boxes[0]]]
right = boxes[0]["x1"]
for b in boxes[1:]:
b["cn"] = len(cols) - 1
lst_c = cols[-1]
if (int(b.get("C", "1")) - int(lst_c[-1].get("C", "1")) == 1 and b["page_number"] == lst_c[-1]["page_number"]) or (
b["x0"] >= right and lst_c[-1].get("C", "-1") != b.get("C", "-2")
): # new col
right = b["x1"]
b["cn"] += 1
cols.append([b])
continue
right = (right + b["x1"]) / 2.0
cols[-1].append(b)
tbl = [[[] for _ in range(len(cols))] for _ in range(len(rows))]
for b in boxes:
tbl[b["rn"]][b["cn"]].append(b)
if len(rows) >= 4:
# remove single in column
j = 0
while j < len(tbl[0]):
e, ii = 0, 0
for i in range(len(tbl)):
if tbl[i][j]:
e += 1
ii = i
if e > 1:
break
if e > 1:
j += 1
continue
f = (j > 0 and tbl[ii][j - 1] and tbl[ii][j - 1][0].get("text")) or j == 0
ff = (j + 1 < len(tbl[ii]) and tbl[ii][j + 1] and tbl[ii][j + 1][0].get("text")) or j + 1 >= len(tbl[ii])
if f and ff:
j += 1
continue
bx = tbl[ii][j][0]
logging.debug("Relocate column single: " + bx["text"])
# j column only has one value
left, right = 100000, 100000
if j > 0 and not f:
for i in range(len(tbl)):
if tbl[i][j - 1]:
left = min(left, np.min([bx["x0"] - a["x1"] for a in tbl[i][j - 1]]))
if j + 1 < len(tbl[0]) and not ff:
for i in range(len(tbl)):
if tbl[i][j + 1]:
right = min(right, np.min([a["x0"] - bx["x1"] for a in tbl[i][j + 1]]))
assert left < 100000 or right < 100000
if left < right:
for jj in range(j, len(tbl[0])):
for i in range(len(tbl)):
for a in tbl[i][jj]:
a["cn"] -= 1
if tbl[ii][j - 1]:
tbl[ii][j - 1].extend(tbl[ii][j])
else:
tbl[ii][j - 1] = tbl[ii][j]
for i in range(len(tbl)):
tbl[i].pop(j)
else:
for jj in range(j + 1, len(tbl[0])):
for i in range(len(tbl)):
for a in tbl[i][jj]:
a["cn"] -= 1
if tbl[ii][j + 1]:
tbl[ii][j + 1].extend(tbl[ii][j])
else:
tbl[ii][j + 1] = tbl[ii][j]
for i in range(len(tbl)):
tbl[i].pop(j)
cols.pop(j)
assert len(cols) == len(tbl[0]), "Column NO. miss matched: %d vs %d" % (len(cols), len(tbl[0]))
if len(cols) >= 4:
# remove single in row
i = 0
while i < len(tbl):
e, jj = 0, 0
for j in range(len(tbl[i])):
if tbl[i][j]:
e += 1
jj = j
if e > 1:
break
if e > 1:
i += 1
continue
f = (i > 0 and tbl[i - 1][jj] and tbl[i - 1][jj][0].get("text")) or i == 0
ff = (i + 1 < len(tbl) and tbl[i + 1][jj] and tbl[i + 1][jj][0].get("text")) or i + 1 >= len(tbl)
if f and ff:
i += 1
continue
bx = tbl[i][jj][0]
logging.debug("Relocate row single: " + bx["text"])
# i row only has one value
up, down = 100000, 100000
if i > 0 and not f:
for j in range(len(tbl[i - 1])):
if tbl[i - 1][j]:
up = min(up, np.min([bx["top"] - a["bottom"] for a in tbl[i - 1][j]]))
if i + 1 < len(tbl) and not ff:
for j in range(len(tbl[i + 1])):
if tbl[i + 1][j]:
down = min(down, np.min([a["top"] - bx["bottom"] for a in tbl[i + 1][j]]))
assert up < 100000 or down < 100000
if up < down:
for ii in range(i, len(tbl)):
for j in range(len(tbl[ii])):
for a in tbl[ii][j]:
a["rn"] -= 1
if tbl[i - 1][jj]:
tbl[i - 1][jj].extend(tbl[i][jj])
else:
tbl[i - 1][jj] = tbl[i][jj]
tbl.pop(i)
else:
for ii in range(i + 1, len(tbl)):
for j in range(len(tbl[ii])):
for a in tbl[ii][j]:
a["rn"] -= 1
if tbl[i + 1][jj]:
tbl[i + 1][jj].extend(tbl[i][jj])
else:
tbl[i + 1][jj] = tbl[i][jj]
tbl.pop(i)
rows.pop(i)
# which rows are headers
hdset = set([])
for i in range(len(tbl)):
cnt, h = 0, 0
for j, arr in enumerate(tbl[i]):
if not arr:
continue
cnt += 1
if max_type == "Nu" and arr[0]["btype"] == "Nu":
continue
if any([a.get("H") for a in arr]) or (max_type == "Nu" and arr[0]["btype"] != "Nu"):
h += 1
if h / cnt > 0.5:
hdset.add(i)
if html:
return TableStructureRecognizer.__html_table(cap, hdset, TableStructureRecognizer.__cal_spans(boxes, rows, cols, tbl, True))
return TableStructureRecognizer.__desc_table(cap, hdset, TableStructureRecognizer.__cal_spans(boxes, rows, cols, tbl, False), is_english)
@staticmethod
def __html_table(cap, hdset, tbl):
# constrcut HTML
html = "<table>"
if cap:
html += f"<caption>{cap}</caption>"
for i in range(len(tbl)):
row = "<tr>"
txts = []
for j, arr in enumerate(tbl[i]):
if arr is None:
continue
if not arr:
row += "<td></td>" if i not in hdset else "<th></th>"
continue
txt = ""
if arr:
h = min(np.min([c["bottom"] - c["top"] for c in arr]) / 2, 10)
txt = " ".join([c["text"] for c in Recognizer.sort_Y_firstly(arr, h)])
txts.append(txt)
sp = ""
if arr[0].get("colspan"):
sp = "colspan={}".format(arr[0]["colspan"])
if arr[0].get("rowspan"):
sp += " rowspan={}".format(arr[0]["rowspan"])
if i in hdset:
row += f"<th {sp} >" + txt + "</th>"
else:
row += f"<td {sp} >" + txt + "</td>"
if i in hdset:
if all([t in hdset for t in txts]):
continue
for t in txts:
hdset.add(t)
if row != "<tr>":
row += "</tr>"
else:
row = ""
html += "\n" + row
html += "\n</table>"
return html
@staticmethod
def __desc_table(cap, hdr_rowno, tbl, is_english):
# get text of every colomn in header row to become header text
clmno = len(tbl[0])
rowno = len(tbl)
headers = {}
hdrset = set()
lst_hdr = []
de = "的" if not is_english else " for "
for r in sorted(list(hdr_rowno)):
headers[r] = ["" for _ in range(clmno)]
for i in range(clmno):
if not tbl[r][i]:
continue
txt = " ".join([a["text"].strip() for a in tbl[r][i]])
headers[r][i] = txt
hdrset.add(txt)
if all([not t for t in headers[r]]):
del headers[r]
hdr_rowno.remove(r)
continue
for j in range(clmno):
if headers[r][j]:
continue
if j >= len(lst_hdr):
break
headers[r][j] = lst_hdr[j]
lst_hdr = headers[r]
for i in range(rowno):
if i not in hdr_rowno:
continue
for j in range(i + 1, rowno):
if j not in hdr_rowno:
break
for k in range(clmno):
if not headers[j - 1][k]:
continue
if headers[j][k].find(headers[j - 1][k]) >= 0:
continue
if len(headers[j][k]) > len(headers[j - 1][k]):
headers[j][k] += (de if headers[j][k] else "") + headers[j - 1][k]
else:
headers[j][k] = headers[j - 1][k] + (de if headers[j - 1][k] else "") + headers[j][k]
logging.debug(f">>>>>>>>>>>>>>>>>{cap}:SIZE:{rowno}X{clmno} Header: {hdr_rowno}")
row_txt = []
for i in range(rowno):
if i in hdr_rowno:
continue
rtxt = []
def append(delimer):
nonlocal rtxt, row_txt
rtxt = delimer.join(rtxt)
if row_txt and len(row_txt[-1]) + len(rtxt) < 64:
row_txt[-1] += "\n" + rtxt
else:
row_txt.append(rtxt)
r = 0
if len(headers.items()):
_arr = [(i - r, r) for r, _ in headers.items() if r < i]
if _arr:
_, r = min(_arr, key=lambda x: x[0])
if r not in headers and clmno <= 2:
for j in range(clmno):
if not tbl[i][j]:
continue
txt = "".join([a["text"].strip() for a in tbl[i][j]])
if txt:
rtxt.append(txt)
if rtxt:
append(":")
continue
for j in range(clmno):
if not tbl[i][j]:
continue
txt = "".join([a["text"].strip() for a in tbl[i][j]])
if not txt:
continue
ctt = headers[r][j] if r in headers else ""
if ctt:
ctt += ":"
ctt += txt
if ctt:
rtxt.append(ctt)
if rtxt:
row_txt.append("; ".join(rtxt))
if cap:
if is_english:
from_ = " in "
else:
from_ = "来自"
row_txt = [t + f"\t——{from_}“{cap}”" for t in row_txt]
return row_txt
@staticmethod
def __cal_spans(boxes, rows, cols, tbl, html=True):
# caculate span
clft = [np.mean([c.get("C_left", c["x0"]) for c in cln]) for cln in cols]
crgt = [np.mean([c.get("C_right", c["x1"]) for c in cln]) for cln in cols]
rtop = [np.mean([c.get("R_top", c["top"]) for c in row]) for row in rows]
rbtm = [np.mean([c.get("R_btm", c["bottom"]) for c in row]) for row in rows]
for b in boxes:
if "SP" not in b:
continue
b["colspan"] = [b["cn"]]
b["rowspan"] = [b["rn"]]
# col span
for j in range(0, len(clft)):
if j == b["cn"]:
continue
if clft[j] + (crgt[j] - clft[j]) / 2 < b["H_left"]:
continue
if crgt[j] - (crgt[j] - clft[j]) / 2 > b["H_right"]:
continue
b["colspan"].append(j)
# row span
for j in range(0, len(rtop)):
if j == b["rn"]:
continue
if rtop[j] + (rbtm[j] - rtop[j]) / 2 < b["H_top"]:
continue
if rbtm[j] - (rbtm[j] - rtop[j]) / 2 > b["H_bott"]:
continue
b["rowspan"].append(j)
def join(arr):
if not arr:
return ""
return "".join([t["text"] for t in arr])
# rm the spaning cells
for i in range(len(tbl)):
for j, arr in enumerate(tbl[i]):
if not arr:
continue
if all(["rowspan" not in a and "colspan" not in a for a in arr]):
continue
rowspan, colspan = [], []
for a in arr:
if isinstance(a.get("rowspan", 0), list):
rowspan.extend(a["rowspan"])
if isinstance(a.get("colspan", 0), list):
colspan.extend(a["colspan"])
rowspan, colspan = set(rowspan), set(colspan)
if len(rowspan) < 2 and len(colspan) < 2:
for a in arr:
if "rowspan" in a:
del a["rowspan"]
if "colspan" in a:
del a["colspan"]
continue
rowspan, colspan = sorted(rowspan), sorted(colspan)
rowspan = list(range(rowspan[0], rowspan[-1] + 1))
colspan = list(range(colspan[0], colspan[-1] + 1))
assert i in rowspan, rowspan
assert j in colspan, colspan
arr = []
for r in rowspan:
for c in colspan:
arr_txt = join(arr)
if tbl[r][c] and join(tbl[r][c]) != arr_txt:
arr.extend(tbl[r][c])
tbl[r][c] = None if html else arr
for a in arr:
if len(rowspan) > 1:
a["rowspan"] = len(rowspan)
elif "rowspan" in a:
del a["rowspan"]
if len(colspan) > 1:
a["colspan"] = len(colspan)
elif "colspan" in a:
del a["colspan"]
tbl[rowspan[0]][colspan[0]] = arr
return tbl
def _run_ascend_tsr(self, image_list, thr=0.2, batch_size=16):
import math
from ais_bench.infer.interface import InferSession
model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc")
model_file_path = os.path.join(model_dir, "tsr.om")
if not os.path.exists(model_file_path):
raise ValueError(f"Model file not found: {model_file_path}")
device_id = int(os.getenv("ASCEND_LAYOUT_RECOGNIZER_DEVICE_ID", 0))
session = InferSession(device_id=device_id, model_path=model_file_path)
images = [np.array(im) if not isinstance(im, np.ndarray) else im for im in image_list]
results = []
conf_thr = max(thr, 0.08)
batch_loop_cnt = math.ceil(float(len(images)) / batch_size)
for bi in range(batch_loop_cnt):
s = bi * batch_size
e = min((bi + 1) * batch_size, len(images))
batch_images = images[s:e]
inputs_list = self.preprocess(batch_images)
for ins in inputs_list:
feeds = []
if "image" in ins:
feeds.append(ins["image"])
else:
feeds.append(ins[self.input_names[0]])
output_list = session.infer(feeds=feeds, mode="static")
bb = self.postprocess(output_list, ins, conf_thr)
results.append(bb)
return results
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/vision/layout_recognizer.py | deepdoc/vision/layout_recognizer.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import math
import os
# import re
from collections import Counter
from copy import deepcopy
import cv2
import numpy as np
from huggingface_hub import snapshot_download
from common.file_utils import get_project_base_directory
from deepdoc.vision import Recognizer
from deepdoc.vision.operators import nms
class LayoutRecognizer(Recognizer):
labels = [
"_background_",
"Text",
"Title",
"Figure",
"Figure caption",
"Table",
"Table caption",
"Header",
"Footer",
"Reference",
"Equation",
]
def __init__(self, domain):
try:
model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc")
super().__init__(self.labels, domain, model_dir)
except Exception:
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc", local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), local_dir_use_symlinks=False)
super().__init__(self.labels, domain, model_dir)
self.garbage_layouts = ["footer", "header", "reference"]
self.client = None
if os.environ.get("TENSORRT_DLA_SVR"):
from deepdoc.vision.dla_cli import DLAClient
self.client = DLAClient(os.environ["TENSORRT_DLA_SVR"])
def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True):
def __is_garbage(b):
return False
# patt = [r"^•+$", "^[0-9]{1,2} / ?[0-9]{1,2}$", r"^[0-9]{1,2} of [0-9]{1,2}$", "^http://[^ ]{12,}", "\\(cid *: *[0-9]+ *\\)"]
# return any([re.search(p, b["text"]) for p in patt])
if self.client:
layouts = self.client.predict(image_list)
else:
layouts = super().__call__(image_list, thr, batch_size)
# save_results(image_list, layouts, self.labels, output_dir='output/', threshold=0.7)
assert len(image_list) == len(ocr_res)
# Tag layout type
boxes = []
assert len(image_list) == len(layouts)
garbages = {}
page_layout = []
for pn, lts in enumerate(layouts):
bxs = ocr_res[pn]
lts = [
{
"type": b["type"],
"score": float(b["score"]),
"x0": b["bbox"][0] / scale_factor,
"x1": b["bbox"][2] / scale_factor,
"top": b["bbox"][1] / scale_factor,
"bottom": b["bbox"][-1] / scale_factor,
"page_number": pn,
}
for b in lts
if float(b["score"]) >= 0.4 or b["type"] not in self.garbage_layouts
]
lts = self.sort_Y_firstly(lts, np.mean([lt["bottom"] - lt["top"] for lt in lts]) / 2)
lts = self.layouts_cleanup(bxs, lts)
page_layout.append(lts)
def findLayout(ty):
nonlocal bxs, lts, self
lts_ = [lt for lt in lts if lt["type"] == ty]
i = 0
while i < len(bxs):
if bxs[i].get("layout_type"):
i += 1
continue
if __is_garbage(bxs[i]):
bxs.pop(i)
continue
ii = self.find_overlapped_with_threshold(bxs[i], lts_, thr=0.4)
if ii is None:
bxs[i]["layout_type"] = ""
i += 1
continue
lts_[ii]["visited"] = True
keep_feats = [
lts_[ii]["type"] == "footer" and bxs[i]["bottom"] < image_list[pn].size[1] * 0.9 / scale_factor,
lts_[ii]["type"] == "header" and bxs[i]["top"] > image_list[pn].size[1] * 0.1 / scale_factor,
]
if drop and lts_[ii]["type"] in self.garbage_layouts and not any(keep_feats):
if lts_[ii]["type"] not in garbages:
garbages[lts_[ii]["type"]] = []
garbages[lts_[ii]["type"]].append(bxs[i]["text"])
bxs.pop(i)
continue
bxs[i]["layoutno"] = f"{ty}-{ii}"
bxs[i]["layout_type"] = lts_[ii]["type"] if lts_[ii]["type"] != "equation" else "figure"
i += 1
for lt in ["footer", "header", "reference", "figure caption", "table caption", "title", "table", "text", "figure", "equation"]:
findLayout(lt)
# add box to figure layouts which has not text box
for i, lt in enumerate([lt for lt in lts if lt["type"] in ["figure", "equation"]]):
if lt.get("visited"):
continue
lt = deepcopy(lt)
del lt["type"]
lt["text"] = ""
lt["layout_type"] = "figure"
lt["layoutno"] = f"figure-{i}"
bxs.append(lt)
boxes.extend(bxs)
ocr_res = boxes
garbag_set = set()
for k in garbages.keys():
garbages[k] = Counter(garbages[k])
for g, c in garbages[k].items():
if c > 1:
garbag_set.add(g)
ocr_res = [b for b in ocr_res if b["text"].strip() not in garbag_set]
return ocr_res, page_layout
def forward(self, image_list, thr=0.7, batch_size=16):
return super().__call__(image_list, thr, batch_size)
class LayoutRecognizer4YOLOv10(LayoutRecognizer):
labels = [
"title",
"Text",
"Reference",
"Figure",
"Figure caption",
"Table",
"Table caption",
"Table caption",
"Equation",
"Figure caption",
]
def __init__(self, domain):
domain = "layout"
super().__init__(domain)
self.auto = False
self.scaleFill = False
self.scaleup = True
self.stride = 32
self.center = True
def preprocess(self, image_list):
inputs = []
new_shape = self.input_shape # height, width
for img in image_list:
shape = img.shape[:2] # current shape [height, width]
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# Compute padding
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
ww, hh = new_unpad
img = np.array(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).astype(np.float32)
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1))
left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)) # add border
img /= 255.0
img = img.transpose(2, 0, 1)
img = img[np.newaxis, :, :, :].astype(np.float32)
inputs.append({self.input_names[0]: img, "scale_factor": [shape[1] / ww, shape[0] / hh, dw, dh]})
return inputs
def postprocess(self, boxes, inputs, thr):
thr = 0.08
boxes = np.squeeze(boxes)
scores = boxes[:, 4]
boxes = boxes[scores > thr, :]
scores = scores[scores > thr]
if len(boxes) == 0:
return []
class_ids = boxes[:, -1].astype(int)
boxes = boxes[:, :4]
boxes[:, 0] -= inputs["scale_factor"][2]
boxes[:, 2] -= inputs["scale_factor"][2]
boxes[:, 1] -= inputs["scale_factor"][3]
boxes[:, 3] -= inputs["scale_factor"][3]
input_shape = np.array([inputs["scale_factor"][0], inputs["scale_factor"][1], inputs["scale_factor"][0], inputs["scale_factor"][1]])
boxes = np.multiply(boxes, input_shape, dtype=np.float32)
unique_class_ids = np.unique(class_ids)
indices = []
for class_id in unique_class_ids:
class_indices = np.where(class_ids == class_id)[0]
class_boxes = boxes[class_indices, :]
class_scores = scores[class_indices]
class_keep_boxes = nms(class_boxes, class_scores, 0.45)
indices.extend(class_indices[class_keep_boxes])
return [{"type": self.label_list[class_ids[i]].lower(), "bbox": [float(t) for t in boxes[i].tolist()], "score": float(scores[i])} for i in indices]
class AscendLayoutRecognizer(Recognizer):
labels = [
"title",
"Text",
"Reference",
"Figure",
"Figure caption",
"Table",
"Table caption",
"Table caption",
"Equation",
"Figure caption",
]
def __init__(self, domain):
from ais_bench.infer.interface import InferSession
model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc")
model_file_path = os.path.join(model_dir, domain + ".om")
if not os.path.exists(model_file_path):
raise ValueError(f"Model file not found: {model_file_path}")
device_id = int(os.getenv("ASCEND_LAYOUT_RECOGNIZER_DEVICE_ID", 0))
self.session = InferSession(device_id=device_id, model_path=model_file_path)
self.input_shape = self.session.get_inputs()[0].shape[2:4] # H,W
self.garbage_layouts = ["footer", "header", "reference"]
def preprocess(self, image_list):
inputs = []
H, W = self.input_shape
for img in image_list:
h, w = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)
r = min(H / h, W / w)
new_unpad = (int(round(w * r)), int(round(h * r)))
dw, dh = (W - new_unpad[0]) / 2.0, (H - new_unpad[1]) / 2.0
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
img /= 255.0
img = img.transpose(2, 0, 1)[np.newaxis, :, :, :].astype(np.float32)
inputs.append(
{
"image": img,
"scale_factor": [w / new_unpad[0], h / new_unpad[1]],
"pad": [dw, dh],
"orig_shape": [h, w],
}
)
return inputs
def postprocess(self, boxes, inputs, thr=0.25):
arr = np.squeeze(boxes)
if arr.ndim == 1:
arr = arr.reshape(1, -1)
results = []
if arr.shape[1] == 6:
# [x1,y1,x2,y2,score,cls]
m = arr[:, 4] >= thr
arr = arr[m]
if arr.size == 0:
return []
xyxy = arr[:, :4].astype(np.float32)
scores = arr[:, 4].astype(np.float32)
cls_ids = arr[:, 5].astype(np.int32)
if "pad" in inputs:
dw, dh = inputs["pad"]
sx, sy = inputs["scale_factor"]
xyxy[:, [0, 2]] -= dw
xyxy[:, [1, 3]] -= dh
xyxy *= np.array([sx, sy, sx, sy], dtype=np.float32)
else:
# backup
sx, sy = inputs["scale_factor"]
xyxy *= np.array([sx, sy, sx, sy], dtype=np.float32)
keep_indices = []
for c in np.unique(cls_ids):
idx = np.where(cls_ids == c)[0]
k = nms(xyxy[idx], scores[idx], 0.45)
keep_indices.extend(idx[k])
for i in keep_indices:
cid = int(cls_ids[i])
if 0 <= cid < len(self.labels):
results.append({"type": self.labels[cid].lower(), "bbox": [float(t) for t in xyxy[i].tolist()], "score": float(scores[i])})
return results
raise ValueError(f"Unexpected output shape: {arr.shape}")
def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True):
import re
from collections import Counter
assert len(image_list) == len(ocr_res)
images = [np.array(im) if not isinstance(im, np.ndarray) else im for im in image_list]
layouts_all_pages = [] # list of list[{"type","score","bbox":[x1,y1,x2,y2]}]
conf_thr = max(thr, 0.08)
batch_loop_cnt = math.ceil(float(len(images)) / batch_size)
for bi in range(batch_loop_cnt):
s = bi * batch_size
e = min((bi + 1) * batch_size, len(images))
batch_images = images[s:e]
inputs_list = self.preprocess(batch_images)
logging.debug("preprocess done")
for ins in inputs_list:
feeds = [ins["image"]]
out_list = self.session.infer(feeds=feeds, mode="static")
for out in out_list:
lts = self.postprocess(out, ins, conf_thr)
page_lts = []
for b in lts:
if float(b["score"]) >= 0.4 or b["type"] not in self.garbage_layouts:
x0, y0, x1, y1 = b["bbox"]
page_lts.append(
{
"type": b["type"],
"score": float(b["score"]),
"x0": float(x0) / scale_factor,
"x1": float(x1) / scale_factor,
"top": float(y0) / scale_factor,
"bottom": float(y1) / scale_factor,
"page_number": len(layouts_all_pages),
}
)
layouts_all_pages.append(page_lts)
def _is_garbage_text(box):
patt = [r"^•+$", r"^[0-9]{1,2} / ?[0-9]{1,2}$", r"^[0-9]{1,2} of [0-9]{1,2}$", r"^http://[^ ]{12,}", r"\(cid *: *[0-9]+ *\)"]
return any(re.search(p, box.get("text", "")) for p in patt)
boxes_out = []
page_layout = []
garbages = {}
for pn, lts in enumerate(layouts_all_pages):
if lts:
avg_h = np.mean([lt["bottom"] - lt["top"] for lt in lts])
lts = self.sort_Y_firstly(lts, avg_h / 2 if avg_h > 0 else 0)
bxs = ocr_res[pn]
lts = self.layouts_cleanup(bxs, lts)
page_layout.append(lts)
def _tag_layout(ty):
nonlocal bxs, lts
lts_of_ty = [lt for lt in lts if lt["type"] == ty]
i = 0
while i < len(bxs):
if bxs[i].get("layout_type"):
i += 1
continue
if _is_garbage_text(bxs[i]):
bxs.pop(i)
continue
ii = self.find_overlapped_with_threshold(bxs[i], lts_of_ty, thr=0.4)
if ii is None:
bxs[i]["layout_type"] = ""
i += 1
continue
lts_of_ty[ii]["visited"] = True
keep_feats = [
lts_of_ty[ii]["type"] == "footer" and bxs[i]["bottom"] < image_list[pn].shape[0] * 0.9 / scale_factor,
lts_of_ty[ii]["type"] == "header" and bxs[i]["top"] > image_list[pn].shape[0] * 0.1 / scale_factor,
]
if drop and lts_of_ty[ii]["type"] in self.garbage_layouts and not any(keep_feats):
garbages.setdefault(lts_of_ty[ii]["type"], []).append(bxs[i].get("text", ""))
bxs.pop(i)
continue
bxs[i]["layoutno"] = f"{ty}-{ii}"
bxs[i]["layout_type"] = lts_of_ty[ii]["type"] if lts_of_ty[ii]["type"] != "equation" else "figure"
i += 1
for ty in ["footer", "header", "reference", "figure caption", "table caption", "title", "table", "text", "figure", "equation"]:
_tag_layout(ty)
figs = [lt for lt in lts if lt["type"] in ["figure", "equation"]]
for i, lt in enumerate(figs):
if lt.get("visited"):
continue
lt = deepcopy(lt)
lt.pop("type", None)
lt["text"] = ""
lt["layout_type"] = "figure"
lt["layoutno"] = f"figure-{i}"
bxs.append(lt)
boxes_out.extend(bxs)
garbag_set = set()
for k, lst in garbages.items():
cnt = Counter(lst)
for g, c in cnt.items():
if c > 1:
garbag_set.add(g)
ocr_res_new = [b for b in boxes_out if b["text"].strip() not in garbag_set]
return ocr_res_new, page_layout
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/mineru_parser.py | deepdoc/parser/mineru_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import sys
import tempfile
import threading
import zipfile
from dataclasses import dataclass
from io import BytesIO
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Optional
import numpy as np
import pdfplumber
import requests
from PIL import Image
from strenum import StrEnum
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
if LOCK_KEY_pdfplumber not in sys.modules:
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
class MinerUContentType(StrEnum):
IMAGE = "image"
TABLE = "table"
TEXT = "text"
EQUATION = "equation"
CODE = "code"
LIST = "list"
DISCARDED = "discarded"
# Mapping from language names to MinerU language codes
LANGUAGE_TO_MINERU_MAP = {
'English': 'en',
'Chinese': 'ch',
'Traditional Chinese': 'chinese_cht',
'Russian': 'east_slavic',
'Ukrainian': 'east_slavic',
'Indonesian': 'latin',
'Spanish': 'latin',
'Vietnamese': 'latin',
'Japanese': 'japan',
'Korean': 'korean',
'Portuguese BR': 'latin',
'German': 'latin',
'French': 'latin',
'Italian': 'latin',
'Tamil': 'ta',
'Telugu': 'te',
'Kannada': 'ka',
'Thai': 'th',
'Greek': 'el',
'Hindi': 'devanagari',
}
class MinerUBackend(StrEnum):
"""MinerU processing backend options."""
PIPELINE = "pipeline" # Traditional multimodel pipeline (default)
VLM_TRANSFORMERS = "vlm-transformers" # Vision-language model using HuggingFace Transformers
VLM_MLX_ENGINE = "vlm-mlx-engine" # Faster, requires Apple Silicon and macOS 13.5+
VLM_VLLM_ENGINE = "vlm-vllm-engine" # Local vLLM engine, requires local GPU
VLM_VLLM_ASYNC_ENGINE = "vlm-vllm-async-engine" # Asynchronous vLLM engine, new in MinerU API
VLM_LMDEPLOY_ENGINE = "vlm-lmdeploy-engine" # LMDeploy engine
VLM_HTTP_CLIENT = "vlm-http-client" # HTTP client for remote vLLM server (CPU only)
class MinerULanguage(StrEnum):
"""MinerU supported languages for OCR (pipeline backend only)."""
CH = "ch" # Chinese
CH_SERVER = "ch_server" # Chinese (server)
CH_LITE = "ch_lite" # Chinese (lite)
EN = "en" # English
KOREAN = "korean" # Korean
JAPAN = "japan" # Japanese
CHINESE_CHT = "chinese_cht" # Chinese Traditional
TA = "ta" # Tamil
TE = "te" # Telugu
KA = "ka" # Kannada
TH = "th" # Thai
EL = "el" # Greek
LATIN = "latin" # Latin
ARABIC = "arabic" # Arabic
EAST_SLAVIC = "east_slavic" # East Slavic
CYRILLIC = "cyrillic" # Cyrillic
DEVANAGARI = "devanagari" # Devanagari
class MinerUParseMethod(StrEnum):
"""MinerU PDF parsing methods (pipeline backend only)."""
AUTO = "auto" # Automatically determine the method based on the file type
TXT = "txt" # Use text extraction method
OCR = "ocr" # Use OCR method for image-based PDFs
@dataclass
class MinerUParseOptions:
"""Options for MinerU PDF parsing."""
backend: MinerUBackend = MinerUBackend.PIPELINE
lang: Optional[MinerULanguage] = None # language for OCR (pipeline backend only)
method: MinerUParseMethod = MinerUParseMethod.AUTO
server_url: Optional[str] = None
delete_output: bool = True
parse_method: str = "raw"
formula_enable: bool = True
table_enable: bool = True
class MinerUParser(RAGFlowPdfParser):
def __init__(self, mineru_path: str = "mineru", mineru_api: str = "", mineru_server_url: str = ""):
self.mineru_api = mineru_api.rstrip("/")
self.mineru_server_url = mineru_server_url.rstrip("/")
self.outlines = []
self.logger = logging.getLogger(self.__class__.__name__)
def _extract_zip_no_root(self, zip_path, extract_to, root_dir):
self.logger.info(f"[MinerU] Extract zip: zip_path={zip_path}, extract_to={extract_to}, root_hint={root_dir}")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
if not root_dir:
files = zip_ref.namelist()
if files and files[0].endswith("/"):
root_dir = files[0]
else:
root_dir = None
if not root_dir or not root_dir.endswith("/"):
self.logger.info(f"[MinerU] No root directory found, extracting all (root_hint={root_dir})")
zip_ref.extractall(extract_to)
return
root_len = len(root_dir)
for member in zip_ref.infolist():
filename = member.filename
if filename == root_dir:
self.logger.info("[MinerU] Ignore root folder...")
continue
path = filename
if path.startswith(root_dir):
path = path[root_len:]
full_path = os.path.join(extract_to, path)
if member.is_dir():
os.makedirs(full_path, exist_ok=True)
else:
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, "wb") as f:
f.write(zip_ref.read(filename))
@staticmethod
def _is_http_endpoint_valid(url, timeout=5):
try:
response = requests.head(url, timeout=timeout, allow_redirects=True)
return response.status_code in [200, 301, 302, 307, 308]
except Exception:
return False
def check_installation(self, backend: str = "pipeline", server_url: Optional[str] = None) -> tuple[bool, str]:
reason = ""
valid_backends = ["pipeline", "vlm-http-client", "vlm-transformers", "vlm-vllm-engine", "vlm-mlx-engine", "vlm-vllm-async-engine", "vlm-lmdeploy-engine"]
if backend not in valid_backends:
reason = f"[MinerU] Invalid backend '{backend}'. Valid backends are: {valid_backends}"
self.logger.warning(reason)
return False, reason
if not self.mineru_api:
reason = "[MinerU] MINERU_APISERVER not configured."
self.logger.warning(reason)
return False, reason
api_openapi = f"{self.mineru_api}/openapi.json"
try:
api_ok = self._is_http_endpoint_valid(api_openapi)
self.logger.info(f"[MinerU] API openapi.json reachable={api_ok} url={api_openapi}")
if not api_ok:
reason = f"[MinerU] MinerU API not accessible: {api_openapi}"
return False, reason
except Exception as exc:
reason = f"[MinerU] MinerU API check failed: {exc}"
self.logger.warning(reason)
return False, reason
if backend == "vlm-http-client":
resolved_server = server_url or self.mineru_server_url
if not resolved_server:
reason = "[MinerU] MINERU_SERVER_URL required for vlm-http-client backend."
self.logger.warning(reason)
return False, reason
try:
server_ok = self._is_http_endpoint_valid(resolved_server)
self.logger.info(f"[MinerU] vlm-http-client server check reachable={server_ok} url={resolved_server}")
except Exception as exc:
self.logger.warning(f"[MinerU] vlm-http-client server probe failed: {resolved_server}: {exc}")
return True, reason
def _run_mineru(
self, input_path: Path, output_dir: Path, options: MinerUParseOptions, callback: Optional[Callable] = None
) -> Path:
return self._run_mineru_api(input_path, output_dir, options, callback)
def _run_mineru_api(
self, input_path: Path, output_dir: Path, options: MinerUParseOptions, callback: Optional[Callable] = None
) -> Path:
pdf_file_path = str(input_path)
if not os.path.exists(pdf_file_path):
raise RuntimeError(f"[MinerU] PDF file not exists: {pdf_file_path}")
pdf_file_name = Path(pdf_file_path).stem.strip()
output_path = tempfile.mkdtemp(prefix=f"{pdf_file_name}_{options.method}_", dir=str(output_dir))
output_zip_path = os.path.join(str(output_dir), f"{Path(output_path).name}.zip")
files = {"files": (pdf_file_name + ".pdf", open(pdf_file_path, "rb"), "application/pdf")}
data = {
"output_dir": "./output",
"lang_list": options.lang,
"backend": options.backend,
"parse_method": options.method,
"formula_enable": options.formula_enable,
"table_enable": options.table_enable,
"server_url": None,
"return_md": True,
"return_middle_json": True,
"return_model_output": True,
"return_content_list": True,
"return_images": True,
"response_format_zip": True,
"start_page_id": 0,
"end_page_id": 99999,
}
if options.server_url:
data["server_url"] = options.server_url
elif self.mineru_server_url:
data["server_url"] = self.mineru_server_url
self.logger.info(f"[MinerU] request {data=}")
self.logger.info(f"[MinerU] request {options=}")
headers = {"Accept": "application/json"}
try:
self.logger.info(f"[MinerU] invoke api: {self.mineru_api}/file_parse backend={options.backend} server_url={data.get('server_url')}")
if callback:
callback(0.20, f"[MinerU] invoke api: {self.mineru_api}/file_parse")
response = requests.post(url=f"{self.mineru_api}/file_parse", files=files, data=data, headers=headers,
timeout=1800)
response.raise_for_status()
if response.headers.get("Content-Type") == "application/zip":
self.logger.info(f"[MinerU] zip file returned, saving to {output_zip_path}...")
if callback:
callback(0.30, f"[MinerU] zip file returned, saving to {output_zip_path}...")
with open(output_zip_path, "wb") as f:
f.write(response.content)
self.logger.info(f"[MinerU] Unzip to {output_path}...")
self._extract_zip_no_root(output_zip_path, output_path, pdf_file_name + "/")
if callback:
callback(0.40, f"[MinerU] Unzip to {output_path}...")
else:
self.logger.warning(f"[MinerU] not zip returned from api: {response.headers.get('Content-Type')}")
except Exception as e:
raise RuntimeError(f"[MinerU] api failed with exception {e}")
self.logger.info("[MinerU] Api completed successfully.")
return Path(output_path)
def __images__(self, fnm, zoomin: int = 1, page_from=0, page_to=600, callback=None):
self.page_from = page_from
self.page_to = page_to
try:
with pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(BytesIO(fnm)) as pdf:
self.pdf = pdf
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for _, p in
enumerate(self.pdf.pages[page_from:page_to])]
except Exception as e:
self.page_images = None
self.total_page = 0
self.logger.exception(e)
def _line_tag(self, bx):
pn = [bx["page_idx"] + 1]
positions = bx.get("bbox", (0, 0, 0, 0))
x0, top, x1, bott = positions
if hasattr(self, "page_images") and self.page_images and len(self.page_images) > bx["page_idx"]:
page_width, page_height = self.page_images[bx["page_idx"]].size
x0 = (x0 / 1000.0) * page_width
x1 = (x1 / 1000.0) * page_width
top = (top / 1000.0) * page_height
bott = (bott / 1000.0) * page_height
return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##".format("-".join([str(p) for p in pn]), x0, x1, top, bott)
def crop(self, text, ZM=1, need_position=False):
imgs = []
poss = self.extract_positions(text)
if not poss:
if need_position:
return None, None
return
if not getattr(self, "page_images", None):
self.logger.warning("[MinerU] crop called without page images; skipping image generation.")
if need_position:
return None, None
return
page_count = len(self.page_images)
filtered_poss = []
for pns, left, right, top, bottom in poss:
if not pns:
self.logger.warning("[MinerU] Empty page index list in crop; skipping this position.")
continue
valid_pns = [p for p in pns if 0 <= p < page_count]
if not valid_pns:
self.logger.warning(f"[MinerU] All page indices {pns} out of range for {page_count} pages; skipping.")
continue
filtered_poss.append((valid_pns, left, right, top, bottom))
poss = filtered_poss
if not poss:
self.logger.warning("[MinerU] No valid positions after filtering; skip cropping.")
if need_position:
return None, None
return
max_width = max(np.max([right - left for (_, left, right, _, _) in poss]), 6)
GAP = 6
pos = poss[0]
first_page_idx = pos[0][0]
poss.insert(0, ([first_page_idx], pos[1], pos[2], max(0, pos[3] - 120), max(pos[3] - GAP, 0)))
pos = poss[-1]
last_page_idx = pos[0][-1]
if not (0 <= last_page_idx < page_count):
self.logger.warning(
f"[MinerU] Last page index {last_page_idx} out of range for {page_count} pages; skipping crop.")
if need_position:
return None, None
return
last_page_height = self.page_images[last_page_idx].size[1]
poss.append(
(
[last_page_idx],
pos[1],
pos[2],
min(last_page_height, pos[4] + GAP),
min(last_page_height, pos[4] + 120),
)
)
positions = []
for ii, (pns, left, right, top, bottom) in enumerate(poss):
right = left + max_width
if bottom <= top:
bottom = top + 2
for pn in pns[1:]:
if 0 <= pn - 1 < page_count:
bottom += self.page_images[pn - 1].size[1]
else:
self.logger.warning(
f"[MinerU] Page index {pn}-1 out of range for {page_count} pages during crop; skipping height accumulation.")
if not (0 <= pns[0] < page_count):
self.logger.warning(
f"[MinerU] Base page index {pns[0]} out of range for {page_count} pages during crop; skipping this segment.")
continue
img0 = self.page_images[pns[0]]
x0, y0, x1, y1 = int(left), int(top), int(right), int(min(bottom, img0.size[1]))
crop0 = img0.crop((x0, y0, x1, y1))
imgs.append(crop0)
if 0 < ii < len(poss) - 1:
positions.append((pns[0] + self.page_from, x0, x1, y0, y1))
bottom -= img0.size[1]
for pn in pns[1:]:
if not (0 <= pn < page_count):
self.logger.warning(
f"[MinerU] Page index {pn} out of range for {page_count} pages during crop; skipping this page.")
continue
page = self.page_images[pn]
x0, y0, x1, y1 = int(left), 0, int(right), int(min(bottom, page.size[1]))
cimgp = page.crop((x0, y0, x1, y1))
imgs.append(cimgp)
if 0 < ii < len(poss) - 1:
positions.append((pn + self.page_from, x0, x1, y0, y1))
bottom -= page.size[1]
if not imgs:
if need_position:
return None, None
return
height = 0
for img in imgs:
height += img.size[1] + GAP
height = int(height)
width = int(np.max([i.size[0] for i in imgs]))
pic = Image.new("RGB", (width, height), (245, 245, 245))
height = 0
for ii, img in enumerate(imgs):
if ii == 0 or ii + 1 == len(imgs):
img = img.convert("RGBA")
overlay = Image.new("RGBA", img.size, (0, 0, 0, 0))
overlay.putalpha(128)
img = Image.alpha_composite(img, overlay).convert("RGB")
pic.paste(img, (0, int(height)))
height += img.size[1] + GAP
if need_position:
return pic, positions
return pic
@staticmethod
def extract_positions(txt: str):
poss = []
for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", txt):
pn, left, right, top, bottom = tag.strip("#").strip("@").split("\t")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom))
return poss
def _read_output(self, output_dir: Path, file_stem: str, method: str = "auto", backend: str = "pipeline") -> list[
dict[str, Any]]:
json_file = None
subdir = None
attempted = []
# mirror MinerU's sanitize_filename to align ZIP naming
def _sanitize_filename(name: str) -> str:
sanitized = re.sub(r"[/\\\.]{2,}|[/\\]", "", name)
sanitized = re.sub(r"[^\w.-]", "_", sanitized, flags=re.UNICODE)
if sanitized.startswith("."):
sanitized = "_" + sanitized[1:]
return sanitized or "unnamed"
safe_stem = _sanitize_filename(file_stem)
allowed_names = {f"{file_stem}_content_list.json", f"{safe_stem}_content_list.json"}
self.logger.info(f"[MinerU] Expected output files: {', '.join(sorted(allowed_names))}")
self.logger.info(f"[MinerU] Searching output in: {output_dir}")
jf = output_dir / f"{file_stem}_content_list.json"
self.logger.info(f"[MinerU] Trying original path: {jf}")
attempted.append(jf)
if jf.exists():
subdir = output_dir
json_file = jf
else:
alt = output_dir / f"{safe_stem}_content_list.json"
self.logger.info(f"[MinerU] Trying sanitized filename: {alt}")
attempted.append(alt)
if alt.exists():
subdir = output_dir
json_file = alt
else:
nested_alt = output_dir / safe_stem / f"{safe_stem}_content_list.json"
self.logger.info(f"[MinerU] Trying sanitized nested path: {nested_alt}")
attempted.append(nested_alt)
if nested_alt.exists():
subdir = nested_alt.parent
json_file = nested_alt
if not json_file:
raise FileNotFoundError(f"[MinerU] Missing output file, tried: {', '.join(str(p) for p in attempted)}")
with open(json_file, "r", encoding="utf-8") as f:
data = json.load(f)
for item in data:
for key in ("img_path", "table_img_path", "equation_img_path"):
if key in item and item[key]:
item[key] = str((subdir / item[key]).resolve())
return data
def _transfer_to_sections(self, outputs: list[dict[str, Any]], parse_method: str = None):
sections = []
for output in outputs:
match output["type"]:
case MinerUContentType.TEXT:
section = output.get("text", "")
case MinerUContentType.TABLE:
section = output.get("table_body", "") + "\n".join(output.get("table_caption", [])) + "\n".join(
output.get("table_footnote", []))
if not section.strip():
section = "FAILED TO PARSE TABLE"
case MinerUContentType.IMAGE:
section = "".join(output.get("image_caption", [])) + "\n" + "".join(
output.get("image_footnote", []))
case MinerUContentType.EQUATION:
section = output.get("text", "")
case MinerUContentType.CODE:
section = output.get("code_body", "") + "\n".join(output.get("code_caption", []))
case MinerUContentType.LIST:
section = "\n".join(output.get("list_items", []))
case MinerUContentType.DISCARDED:
continue # Skip discarded blocks entirely
if section and parse_method == "manual":
sections.append((section, output["type"], self._line_tag(output)))
elif section and parse_method == "paper":
sections.append((section + self._line_tag(output), output["type"]))
else:
sections.append((section, self._line_tag(output)))
return sections
def _transfer_to_tables(self, outputs: list[dict[str, Any]]):
return []
def parse_pdf(
self,
filepath: str | PathLike[str],
binary: BytesIO | bytes,
callback: Optional[Callable] = None,
*,
output_dir: Optional[str] = None,
backend: str = "pipeline",
server_url: Optional[str] = None,
delete_output: bool = True,
parse_method: str = "raw",
**kwargs,
) -> tuple:
import shutil
temp_pdf = None
created_tmp_dir = False
parser_cfg = kwargs.get('parser_config', {})
lang = parser_cfg.get('mineru_lang') or kwargs.get('lang', 'English')
mineru_lang_code = LANGUAGE_TO_MINERU_MAP.get(lang, 'ch') # Defaults to Chinese if not matched
mineru_method_raw_str = parser_cfg.get('mineru_parse_method', 'auto')
enable_formula = parser_cfg.get('mineru_formula_enable', True)
enable_table = parser_cfg.get('mineru_table_enable', True)
# remove spaces, or mineru crash, and _read_output fail too
file_path = Path(filepath)
pdf_file_name = file_path.stem.replace(" ", "") + ".pdf"
pdf_file_path_valid = os.path.join(file_path.parent, pdf_file_name)
if binary:
temp_dir = Path(tempfile.mkdtemp(prefix="mineru_bin_pdf_"))
temp_pdf = temp_dir / pdf_file_name
with open(temp_pdf, "wb") as f:
f.write(binary)
pdf = temp_pdf
self.logger.info(f"[MinerU] Received binary PDF -> {temp_pdf}")
if callback:
callback(0.15, f"[MinerU] Received binary PDF -> {temp_pdf}")
else:
if pdf_file_path_valid != filepath:
self.logger.info(f"[MinerU] Remove all space in file name: {pdf_file_path_valid}")
shutil.move(filepath, pdf_file_path_valid)
pdf = Path(pdf_file_path_valid)
if not pdf.exists():
if callback:
callback(-1, f"[MinerU] PDF not found: {pdf}")
raise FileNotFoundError(f"[MinerU] PDF not found: {pdf}")
if output_dir:
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
else:
out_dir = Path(tempfile.mkdtemp(prefix="mineru_pdf_"))
created_tmp_dir = True
self.logger.info(f"[MinerU] Output directory: {out_dir} backend={backend} api={self.mineru_api} server_url={server_url or self.mineru_server_url}")
if callback:
callback(0.15, f"[MinerU] Output directory: {out_dir}")
self.__images__(pdf, zoomin=1)
try:
options = MinerUParseOptions(
backend=MinerUBackend(backend),
lang=MinerULanguage(mineru_lang_code),
method=MinerUParseMethod(mineru_method_raw_str),
server_url=server_url,
delete_output=delete_output,
parse_method=parse_method,
formula_enable=enable_formula,
table_enable=enable_table,
)
final_out_dir = self._run_mineru(pdf, out_dir, options, callback=callback)
outputs = self._read_output(final_out_dir, pdf.stem, method=mineru_method_raw_str, backend=backend)
self.logger.info(f"[MinerU] Parsed {len(outputs)} blocks from PDF.")
if callback:
callback(0.75, f"[MinerU] Parsed {len(outputs)} blocks from PDF.")
return self._transfer_to_sections(outputs, parse_method), self._transfer_to_tables(outputs)
finally:
if temp_pdf and temp_pdf.exists():
try:
temp_pdf.unlink()
temp_pdf.parent.rmdir()
except Exception:
pass
if delete_output and created_tmp_dir and out_dir.exists():
try:
shutil.rmtree(out_dir)
except Exception:
pass
if __name__ == "__main__":
parser = MinerUParser("mineru")
ok, reason = parser.check_installation()
print("MinerU available:", ok)
filepath = ""
with open(filepath, "rb") as file:
outputs = parser.parse_pdf(filepath=filepath, binary=file.read())
for output in outputs:
print(output)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/excel_parser.py | deepdoc/parser/excel_parser.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import sys
from io import BytesIO
import pandas as pd
from openpyxl import Workbook, load_workbook
from PIL import Image
from rag.nlp import find_codec
# copied from `/openpyxl/cell/cell.py`
ILLEGAL_CHARACTERS_RE = re.compile(r"[\000-\010]|[\013-\014]|[\016-\037]")
class RAGFlowExcelParser:
@staticmethod
def _load_excel_to_workbook(file_like_object):
if isinstance(file_like_object, bytes):
file_like_object = BytesIO(file_like_object)
# Read first 4 bytes to determine file type
file_like_object.seek(0)
file_head = file_like_object.read(4)
file_like_object.seek(0)
if not (file_head.startswith(b"PK\x03\x04") or file_head.startswith(b"\xd0\xcf\x11\xe0")):
logging.info("Not an Excel file, converting CSV to Excel Workbook")
try:
file_like_object.seek(0)
df = pd.read_csv(file_like_object, on_bad_lines='skip')
return RAGFlowExcelParser._dataframe_to_workbook(df)
except Exception as e_csv:
raise Exception(f"Failed to parse CSV and convert to Excel Workbook: {e_csv}")
try:
return load_workbook(file_like_object, data_only=True)
except Exception as e:
logging.info(f"openpyxl load error: {e}, try pandas instead")
try:
file_like_object.seek(0)
try:
dfs = pd.read_excel(file_like_object, sheet_name=None)
return RAGFlowExcelParser._dataframe_to_workbook(dfs)
except Exception as ex:
logging.info(f"pandas with default engine load error: {ex}, try calamine instead")
file_like_object.seek(0)
df = pd.read_excel(file_like_object, engine="calamine")
return RAGFlowExcelParser._dataframe_to_workbook(df)
except Exception as e_pandas:
raise Exception(f"pandas.read_excel error: {e_pandas}, original openpyxl error: {e}")
@staticmethod
def _clean_dataframe(df: pd.DataFrame):
def clean_string(s):
if isinstance(s, str):
return ILLEGAL_CHARACTERS_RE.sub(" ", s)
return s
return df.apply(lambda col: col.map(clean_string))
@staticmethod
def _dataframe_to_workbook(df):
# if contains multiple sheets use _dataframes_to_workbook
if isinstance(df, dict) and len(df) > 1:
return RAGFlowExcelParser._dataframes_to_workbook(df)
df = RAGFlowExcelParser._clean_dataframe(df)
wb = Workbook()
ws = wb.active
ws.title = "Data"
for col_num, column_name in enumerate(df.columns, 1):
ws.cell(row=1, column=col_num, value=column_name)
for row_num, row in enumerate(df.values, 2):
for col_num, value in enumerate(row, 1):
ws.cell(row=row_num, column=col_num, value=value)
return wb
@staticmethod
def _dataframes_to_workbook(dfs: dict):
wb = Workbook()
default_sheet = wb.active
wb.remove(default_sheet)
for sheet_name, df in dfs.items():
df = RAGFlowExcelParser._clean_dataframe(df)
ws = wb.create_sheet(title=sheet_name)
for col_num, column_name in enumerate(df.columns, 1):
ws.cell(row=1, column=col_num, value=column_name)
for row_num, row in enumerate(df.values, 2):
for col_num, value in enumerate(row, 1):
ws.cell(row=row_num, column=col_num, value=value)
return wb
@staticmethod
def _extract_images_from_worksheet(ws, sheetname=None):
"""
Extract images from a worksheet and enrich them with vision-based descriptions.
Returns: List[dict]
"""
images = getattr(ws, "_images", [])
if not images:
return []
raw_items = []
for img in images:
try:
img_bytes = img._data()
pil_img = Image.open(BytesIO(img_bytes)).convert("RGB")
anchor = img.anchor
if hasattr(anchor, "_from") and hasattr(anchor, "_to"):
r1, c1 = anchor._from.row + 1, anchor._from.col + 1
r2, c2 = anchor._to.row + 1, anchor._to.col + 1
if r1 == r2 and c1 == c2:
span = "single_cell"
else:
span = "multi_cell"
else:
r1, c1 = anchor._from.row + 1, anchor._from.col + 1
r2, c2 = r1, c1
span = "single_cell"
item = {
"sheet": sheetname or ws.title,
"image": pil_img,
"image_description": "",
"row_from": r1,
"col_from": c1,
"row_to": r2,
"col_to": c2,
"span_type": span,
}
raw_items.append(item)
except Exception:
continue
return raw_items
def html(self, fnm, chunk_rows=256):
from html import escape
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
wb = RAGFlowExcelParser._load_excel_to_workbook(file_like_object)
tb_chunks = []
def _fmt(v):
if v is None:
return ""
return str(v).strip()
for sheetname in wb.sheetnames:
ws = wb[sheetname]
try:
rows = list(ws.rows)
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
if not rows:
continue
tb_rows_0 = "<tr>"
for t in list(rows[0]):
tb_rows_0 += f"<th>{escape(_fmt(t.value))}</th>"
tb_rows_0 += "</tr>"
for chunk_i in range((len(rows) - 1) // chunk_rows + 1):
tb = ""
tb += f"<table><caption>{sheetname}</caption>"
tb += tb_rows_0
for r in list(rows[1 + chunk_i * chunk_rows : min(1 + (chunk_i + 1) * chunk_rows, len(rows))]):
tb += "<tr>"
for i, c in enumerate(r):
if c.value is None:
tb += "<td></td>"
else:
tb += f"<td>{escape(_fmt(c.value))}</td>"
tb += "</tr>"
tb += "</table>\n"
tb_chunks.append(tb)
return tb_chunks
def markdown(self, fnm):
import pandas as pd
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
try:
file_like_object.seek(0)
df = pd.read_excel(file_like_object)
except Exception as e:
logging.warning(f"Parse spreadsheet error: {e}, trying to interpret as CSV file")
file_like_object.seek(0)
df = pd.read_csv(file_like_object, on_bad_lines='skip')
df = df.replace(r"^\s*$", "", regex=True)
return df.to_markdown(index=False)
def __call__(self, fnm):
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
wb = RAGFlowExcelParser._load_excel_to_workbook(file_like_object)
res = []
for sheetname in wb.sheetnames:
ws = wb[sheetname]
try:
rows = list(ws.rows)
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
if not rows:
continue
ti = list(rows[0])
for r in list(rows[1:]):
fields = []
for i, c in enumerate(r):
if not c.value:
continue
t = str(ti[i].value) if i < len(ti) else ""
t += (":" if t else "") + str(c.value)
fields.append(t)
line = "; ".join(fields)
if sheetname.lower().find("sheet") < 0:
line += " ——" + sheetname
res.append(line)
return res
@staticmethod
def row_number(fnm, binary):
if fnm.split(".")[-1].lower().find("xls") >= 0:
wb = RAGFlowExcelParser._load_excel_to_workbook(BytesIO(binary))
total = 0
for sheetname in wb.sheetnames:
try:
ws = wb[sheetname]
total += len(list(ws.rows))
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
return total
if fnm.split(".")[-1].lower() in ["csv", "txt"]:
encoding = find_codec(binary)
txt = binary.decode(encoding, errors="ignore")
return len(txt.split("\n"))
if __name__ == "__main__":
psr = RAGFlowExcelParser()
psr(sys.argv[1])
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/tcadp_parser.py | deepdoc/parser/tcadp_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import logging
import os
import shutil
import tempfile
import time
import traceback
import types
import zipfile
from datetime import datetime
from io import BytesIO
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Optional
import requests
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.lkeap.v20240522 import lkeap_client, models
from common.config_utils import get_base_config
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
class TencentCloudAPIClient:
"""Tencent Cloud API client using official SDK"""
def __init__(self, secret_id, secret_key, region):
self.secret_id = secret_id
self.secret_key = secret_key
self.region = region
self.outlines = []
# Create credentials
self.cred = credential.Credential(secret_id, secret_key)
# Instantiate an http option, optional, can be skipped if no special requirements
self.httpProfile = HttpProfile()
self.httpProfile.endpoint = "lkeap.tencentcloudapi.com"
# Instantiate a client option, optional, can be skipped if no special requirements
self.clientProfile = ClientProfile()
self.clientProfile.httpProfile = self.httpProfile
# Instantiate the client object for the product to be requested, clientProfile is optional
self.client = lkeap_client.LkeapClient(self.cred, region, self.clientProfile)
def reconstruct_document_sse(self, file_type, file_url=None, file_base64=None, file_start_page=1, file_end_page=1000, config=None):
"""Call document parsing API using official SDK"""
try:
# Instantiate a request object, each interface corresponds to a request object
req = models.ReconstructDocumentSSERequest()
# Build request parameters
params = {
"FileType": file_type,
"FileStartPageNumber": file_start_page,
"FileEndPageNumber": file_end_page,
}
# According to Tencent Cloud API documentation, either FileUrl or FileBase64 parameter must be provided, if both are provided only FileUrl will be used
if file_url:
params["FileUrl"] = file_url
logging.info(f"[TCADP] Using file URL: {file_url}")
elif file_base64:
params["FileBase64"] = file_base64
logging.info(f"[TCADP] Using Base64 data, length: {len(file_base64)} characters")
else:
raise ValueError("Must provide either FileUrl or FileBase64 parameter")
if config:
params["Config"] = config
req.from_json_string(json.dumps(params))
# The returned resp is an instance of ReconstructDocumentSSEResponse, corresponding to the request object
resp = self.client.ReconstructDocumentSSE(req)
parser_result = {}
# Output json format string response
if isinstance(resp, types.GeneratorType): # Streaming response
logging.info("[TCADP] Detected streaming response")
for event in resp:
logging.info(f"[TCADP] Received event: {event}")
if event.get('data'):
try:
data_dict = json.loads(event['data'])
logging.info(f"[TCADP] Parsed data: {data_dict}")
if data_dict.get('Progress') == "100":
parser_result = data_dict
logging.info("[TCADP] Document parsing completed!")
logging.info(f"[TCADP] Task ID: {data_dict.get('TaskId')}")
logging.info(f"[TCADP] Success pages: {data_dict.get('SuccessPageNum')}")
logging.info(f"[TCADP] Failed pages: {data_dict.get('FailPageNum')}")
# Print failed page information
failed_pages = data_dict.get("FailedPages", [])
if failed_pages:
logging.warning("[TCADP] Failed parsing pages:")
for page in failed_pages:
logging.warning(f"[TCADP] Page number: {page.get('PageNumber')}, Error: {page.get('ErrorMsg')}")
# Check if there is a download link
download_url = data_dict.get("DocumentRecognizeResultUrl")
if download_url:
logging.info(f"[TCADP] Got download link: {download_url}")
else:
logging.warning("[TCADP] No download link obtained")
break # Found final result, exit loop
else:
# Print progress information
progress = data_dict.get("Progress", "0")
logging.info(f"[TCADP] Progress: {progress}%")
except json.JSONDecodeError as e:
logging.error(f"[TCADP] Failed to parse JSON data: {e}")
logging.error(f"[TCADP] Raw data: {event.get('data')}")
continue
else:
logging.info(f"[TCADP] Event without data: {event}")
else: # Non-streaming response
logging.info("[TCADP] Detected non-streaming response")
if hasattr(resp, 'data') and resp.data:
try:
data_dict = json.loads(resp.data)
parser_result = data_dict
logging.info(f"[TCADP] JSON parsing successful: {parser_result}")
except json.JSONDecodeError as e:
logging.error(f"[TCADP] JSON parsing failed: {e}")
return None
else:
logging.error("[TCADP] No data in response")
return None
return parser_result
except TencentCloudSDKException as err:
logging.error(f"[TCADP] Tencent Cloud SDK error: {err}")
return None
except Exception as e:
logging.error(f"[TCADP] Unknown error: {e}")
logging.error(f"[TCADP] Error stack trace: {traceback.format_exc()}")
return None
def download_result_file(self, download_url, output_dir):
"""Download parsing result file"""
if not download_url:
logging.warning("[TCADP] No downloadable result file")
return None
try:
response = requests.get(download_url)
response.raise_for_status()
# Ensure output directory exists
os.makedirs(output_dir, exist_ok=True)
# Generate filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"tcadp_result_{timestamp}.zip"
file_path = os.path.join(output_dir, filename)
# Save file
with open(file_path, "wb") as f:
f.write(response.content)
logging.info(f"[TCADP] Document parsing result downloaded to: {os.path.basename(file_path)}")
return file_path
except requests.exceptions.RequestException as e:
logging.error(f"[TCADP] Failed to download file: {e}")
return None
class TCADPParser(RAGFlowPdfParser):
def __init__(self, secret_id: str = None, secret_key: str = None, region: str = "ap-guangzhou",
table_result_type: str = None, markdown_image_response_type: str = None):
super().__init__()
# First initialize logger
self.logger = logging.getLogger(self.__class__.__name__)
# Log received parameters
self.logger.info(f"[TCADP] Initializing with parameters - table_result_type: {table_result_type}, markdown_image_response_type: {markdown_image_response_type}")
# Priority: read configuration from RAGFlow configuration system (service_conf.yaml)
try:
tcadp_parser = get_base_config("tcadp_config", {})
if isinstance(tcadp_parser, dict) and tcadp_parser:
self.secret_id = secret_id or tcadp_parser.get("secret_id")
self.secret_key = secret_key or tcadp_parser.get("secret_key")
self.region = region or tcadp_parser.get("region", "ap-guangzhou")
# Set table_result_type and markdown_image_response_type from config or parameters
self.table_result_type = table_result_type if table_result_type is not None else tcadp_parser.get("table_result_type", "1")
self.markdown_image_response_type = markdown_image_response_type if markdown_image_response_type is not None else tcadp_parser.get("markdown_image_response_type", "1")
else:
self.logger.error("[TCADP] Please configure tcadp_config in service_conf.yaml first")
# If config file is empty, use provided parameters or defaults
self.secret_id = secret_id
self.secret_key = secret_key
self.region = region or "ap-guangzhou"
self.table_result_type = table_result_type if table_result_type is not None else "1"
self.markdown_image_response_type = markdown_image_response_type if markdown_image_response_type is not None else "1"
except ImportError:
self.logger.info("[TCADP] Configuration module import failed")
# If config file is not available, use provided parameters or defaults
self.secret_id = secret_id
self.secret_key = secret_key
self.region = region or "ap-guangzhou"
self.table_result_type = table_result_type if table_result_type is not None else "1"
self.markdown_image_response_type = markdown_image_response_type if markdown_image_response_type is not None else "1"
# Log final values
self.logger.info(f"[TCADP] Final values - table_result_type: {self.table_result_type}, markdown_image_response_type: {self.markdown_image_response_type}")
if not self.secret_id or not self.secret_key:
raise ValueError("[TCADP] Please set Tencent Cloud API keys, configure tcadp_config in service_conf.yaml")
def check_installation(self) -> bool:
"""Check if Tencent Cloud API configuration is correct"""
try:
# Check necessary configuration parameters
if not self.secret_id or not self.secret_key:
self.logger.error("[TCADP] Tencent Cloud API configuration incomplete")
return False
# Try to create client to verify configuration
TencentCloudAPIClient(self.secret_id, self.secret_key, self.region)
self.logger.info("[TCADP] Tencent Cloud API configuration check passed")
return True
except Exception as e:
self.logger.error(f"[TCADP] Tencent Cloud API configuration check failed: {e}")
return False
def _file_to_base64(self, file_path: str, binary: bytes = None) -> str:
"""Convert file to Base64 format"""
if binary:
# If binary data is directly available, convert directly
return base64.b64encode(binary).decode('utf-8')
else:
# Read from file path and convert
with open(file_path, 'rb') as f:
file_data = f.read()
return base64.b64encode(file_data).decode('utf-8')
def _extract_content_from_zip(self, zip_path: str) -> list[dict[str, Any]]:
"""Extract parsing results from downloaded ZIP file"""
results = []
try:
with zipfile.ZipFile(zip_path, "r") as zip_file:
# Find JSON result files
json_files = [f for f in zip_file.namelist() if f.endswith(".json")]
for json_file in json_files:
with zip_file.open(json_file) as f:
data = json.load(f)
if isinstance(data, list):
results.extend(data)
else:
results.append(data)
# Find Markdown files
md_files = [f for f in zip_file.namelist() if f.endswith(".md")]
for md_file in md_files:
with zip_file.open(md_file) as f:
content = f.read().decode("utf-8")
results.append({"type": "text", "content": content, "file": md_file})
except Exception as e:
self.logger.error(f"[TCADP] Failed to extract ZIP file content: {e}")
return results
def _parse_content_to_sections(self, content_data: list[dict[str, Any]]) -> list[tuple[str, str]]:
"""Convert parsing results to sections format"""
sections = []
for item in content_data:
content_type = item.get("type", "text")
content = item.get("content", "")
if not content:
continue
# Process based on content type
if content_type == "text" or content_type == "paragraph":
section_text = content
elif content_type == "table":
# Handle table content
table_data = item.get("table_data", {})
if isinstance(table_data, dict):
# Convert table data to text
rows = table_data.get("rows", [])
section_text = "\n".join([" | ".join(row) for row in rows])
else:
section_text = str(table_data)
elif content_type == "image":
# Handle image content
caption = item.get("caption", "")
section_text = f"[Image] {caption}" if caption else "[Image]"
elif content_type == "equation":
# Handle equation content
section_text = f"$${content}$$"
else:
section_text = content
if section_text.strip():
# Generate position tag (simplified version)
position_tag = "@@1\t0.0\t1000.0\t0.0\t100.0##"
sections.append((section_text, position_tag))
return sections
def _parse_content_to_tables(self, content_data: list[dict[str, Any]]) -> list:
"""Convert parsing results to tables format"""
tables = []
for item in content_data:
if item.get("type") == "table":
table_data = item.get("table_data", {})
if isinstance(table_data, dict):
rows = table_data.get("rows", [])
if rows:
# Convert to table format
table_html = "<table>\n"
for i, row in enumerate(rows):
table_html += " <tr>\n"
for cell in row:
tag = "th" if i == 0 else "td"
table_html += f" <{tag}>{cell}</{tag}>\n"
table_html += " </tr>\n"
table_html += "</table>"
tables.append(table_html)
return tables
def parse_pdf(
self,
filepath: str | PathLike[str],
binary: BytesIO | bytes,
callback: Optional[Callable] = None,
*,
output_dir: Optional[str] = None,
file_type: str = "PDF",
file_start_page: Optional[int] = 1,
file_end_page: Optional[int] = 1000,
delete_output: Optional[bool] = True,
max_retries: Optional[int] = 1,
) -> tuple:
"""Parse PDF document"""
temp_file = None
created_tmp_dir = False
try:
# Handle input file
if binary:
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
temp_file.write(binary)
temp_file.close()
file_path = temp_file.name
self.logger.info(f"[TCADP] Received binary PDF -> {os.path.basename(file_path)}")
if callback:
callback(0.1, f"[TCADP] Received binary PDF -> {os.path.basename(file_path)}")
else:
file_path = str(filepath)
if not os.path.exists(file_path):
if callback:
callback(-1, f"[TCADP] PDF file does not exist: {file_path}")
raise FileNotFoundError(f"[TCADP] PDF file does not exist: {file_path}")
# Convert file to Base64 format
if callback:
callback(0.2, "[TCADP] Converting file to Base64 format")
file_base64 = self._file_to_base64(file_path, binary)
if callback:
callback(0.25, f"[TCADP] File converted to Base64, size: {len(file_base64)} characters")
# Create Tencent Cloud API client
client = TencentCloudAPIClient(self.secret_id, self.secret_key, self.region)
# Call document parsing API (with retry mechanism)
if callback:
callback(0.3, "[TCADP] Starting to call Tencent Cloud document parsing API")
result = None
for attempt in range(max_retries):
try:
if attempt > 0:
self.logger.info(f"[TCADP] Retry attempt {attempt + 1}")
if callback:
callback(0.3 + attempt * 0.1, f"[TCADP] Retry attempt {attempt + 1}")
time.sleep(2 ** attempt) # Exponential backoff
config = {
"TableResultType": self.table_result_type,
"MarkdownImageResponseType": self.markdown_image_response_type
}
self.logger.info(f"[TCADP] API request config - TableResultType: {self.table_result_type}, MarkdownImageResponseType: {self.markdown_image_response_type}")
result = client.reconstruct_document_sse(
file_type=file_type,
file_base64=file_base64,
file_start_page=file_start_page,
file_end_page=file_end_page,
config=config
)
if result:
self.logger.info(f"[TCADP] Attempt {attempt + 1} successful")
break
else:
self.logger.warning(f"[TCADP] Attempt {attempt + 1} failed, result is None")
except Exception as e:
self.logger.error(f"[TCADP] Attempt {attempt + 1} exception: {e}")
if attempt == max_retries - 1:
raise
if not result:
error_msg = f"[TCADP] Document parsing failed, retried {max_retries} times"
self.logger.error(error_msg)
if callback:
callback(-1, error_msg)
raise RuntimeError(error_msg)
# Get download link
download_url = result.get("DocumentRecognizeResultUrl")
if not download_url:
if callback:
callback(-1, "[TCADP] No parsing result download link obtained")
raise RuntimeError("[TCADP] No parsing result download link obtained")
if callback:
callback(0.6, f"[TCADP] Parsing result download link: {download_url}")
# Set output directory
if output_dir:
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
else:
out_dir = Path(tempfile.mkdtemp(prefix="adp_pdf_"))
created_tmp_dir = True
# Download result file
zip_path = client.download_result_file(download_url, str(out_dir))
if not zip_path:
if callback:
callback(-1, "[TCADP] Failed to download parsing result")
raise RuntimeError("[TCADP] Failed to download parsing result")
if callback:
# Shorten file path display, only show filename
zip_filename = os.path.basename(zip_path)
callback(0.8, f"[TCADP] Parsing result downloaded: {zip_filename}")
# Extract ZIP file content
content_data = self._extract_content_from_zip(zip_path)
self.logger.info(f"[TCADP] Extracted {len(content_data)} content blocks")
if callback:
callback(0.9, f"[TCADP] Extracted {len(content_data)} content blocks")
# Convert to sections and tables format
sections = self._parse_content_to_sections(content_data)
tables = self._parse_content_to_tables(content_data)
self.logger.info(f"[TCADP] Parsing completed: {len(sections)} sections, {len(tables)} tables")
if callback:
callback(1.0, f"[TCADP] Parsing completed: {len(sections)} sections, {len(tables)} tables")
return sections, tables
finally:
# Clean up temporary files
if temp_file and os.path.exists(temp_file.name):
try:
os.unlink(temp_file.name)
except Exception:
pass
if delete_output and created_tmp_dir and out_dir.exists():
try:
shutil.rmtree(out_dir)
except Exception:
pass
if __name__ == "__main__":
# Test ADP parser
parser = TCADPParser()
print("ADP available:", parser.check_installation())
# Test parsing
filepath = ""
if filepath and os.path.exists(filepath):
with open(filepath, "rb") as file:
sections, tables = parser.parse_pdf(filepath=filepath, binary=file.read())
print(f"Parsing result: {len(sections)} sections, {len(tables)} tables")
for i, (section, tag) in enumerate(sections[:3]): # Only print first 3
print(f"Section {i + 1}: {section[:100]}...")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/utils.py | deepdoc/parser/utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rag.nlp import find_codec
def get_text(fnm: str, binary=None) -> str:
txt = ""
if binary:
encoding = find_codec(binary)
txt = binary.decode(encoding, errors="ignore")
else:
with open(fnm, "r") as f:
while True:
line = f.readline()
if not line:
break
txt += line
return txt
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/markdown_parser.py | deepdoc/parser/markdown_parser.py | # -*- coding: utf-8 -*-
#
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from markdown import markdown
class RAGFlowMarkdownParser:
def __init__(self, chunk_token_num=128):
self.chunk_token_num = int(chunk_token_num)
def extract_tables_and_remainder(self, markdown_text, separate_tables=True):
tables = []
working_text = markdown_text
def replace_tables_with_rendered_html(pattern, table_list, render=True):
new_text = ""
last_end = 0
for match in pattern.finditer(working_text):
raw_table = match.group()
table_list.append(raw_table)
if separate_tables:
# Skip this match (i.e., remove it)
new_text += working_text[last_end : match.start()] + "\n\n"
else:
# Replace with rendered HTML
html_table = markdown(raw_table, extensions=["markdown.extensions.tables"]) if render else raw_table
new_text += working_text[last_end : match.start()] + html_table + "\n\n"
last_end = match.end()
new_text += working_text[last_end:]
return new_text
if "|" in markdown_text: # for optimize performance
# Standard Markdown table
border_table_pattern = re.compile(
r"""
(?:\n|^)
(?:\|.*?\|.*?\|.*?\n)
(?:\|(?:\s*[:-]+[-| :]*\s*)\|.*?\n)
(?:\|.*?\|.*?\|.*?\n)+
""",
re.VERBOSE,
)
working_text = replace_tables_with_rendered_html(border_table_pattern, tables)
# Borderless Markdown table
no_border_table_pattern = re.compile(
r"""
(?:\n|^)
(?:\S.*?\|.*?\n)
(?:(?:\s*[:-]+[-| :]*\s*).*?\n)
(?:\S.*?\|.*?\n)+
""",
re.VERBOSE,
)
working_text = replace_tables_with_rendered_html(no_border_table_pattern, tables)
# Replace any TAGS e.g. <table ...> to <table>
TAGS = ["table", "td", "tr", "th", "tbody", "thead", "div"]
table_with_attributes_pattern = re.compile(rf"<(?:{'|'.join(TAGS)})[^>]*>", re.IGNORECASE)
def replace_tag(m):
tag_name = re.match(r"<(\w+)", m.group()).group(1)
return "<{}>".format(tag_name)
working_text = re.sub(table_with_attributes_pattern, replace_tag, working_text)
if "<table>" in working_text.lower(): # for optimize performance
# HTML table extraction - handle possible html/body wrapper tags
html_table_pattern = re.compile(
r"""
(?:\n|^)
\s*
(?:
# case1: <html><body><table>...</table></body></html>
(?:<html[^>]*>\s*<body[^>]*>\s*<table[^>]*>.*?</table>\s*</body>\s*</html>)
|
# case2: <body><table>...</table></body>
(?:<body[^>]*>\s*<table[^>]*>.*?</table>\s*</body>)
|
# case3: only<table>...</table>
(?:<table[^>]*>.*?</table>)
)
\s*
(?=\n|$)
""",
re.VERBOSE | re.DOTALL | re.IGNORECASE,
)
def replace_html_tables():
nonlocal working_text
new_text = ""
last_end = 0
for match in html_table_pattern.finditer(working_text):
raw_table = match.group()
tables.append(raw_table)
if separate_tables:
new_text += working_text[last_end : match.start()] + "\n\n"
else:
new_text += working_text[last_end : match.start()] + raw_table + "\n\n"
last_end = match.end()
new_text += working_text[last_end:]
working_text = new_text
replace_html_tables()
return working_text, tables
class MarkdownElementExtractor:
def __init__(self, markdown_content):
self.markdown_content = markdown_content
self.lines = markdown_content.split("\n")
def get_delimiters(self, delimiters):
toks = re.findall(r"`([^`]+)`", delimiters)
toks = sorted(set(toks), key=lambda x: -len(x))
return "|".join(re.escape(t) for t in toks if t)
def extract_elements(self, delimiter=None, include_meta=False):
"""Extract individual elements (headers, code blocks, lists, etc.)"""
sections = []
i = 0
dels = ""
if delimiter:
dels = self.get_delimiters(delimiter)
if len(dels) > 0:
text = "\n".join(self.lines)
if include_meta:
pattern = re.compile(dels)
last_end = 0
for m in pattern.finditer(text):
part = text[last_end : m.start()]
if part and part.strip():
sections.append(
{
"content": part.strip(),
"start_line": text.count("\n", 0, last_end),
"end_line": text.count("\n", 0, m.start()),
}
)
last_end = m.end()
part = text[last_end:]
if part and part.strip():
sections.append(
{
"content": part.strip(),
"start_line": text.count("\n", 0, last_end),
"end_line": text.count("\n", 0, len(text)),
}
)
else:
parts = re.split(dels, text)
sections = [p.strip() for p in parts if p and p.strip()]
return sections
while i < len(self.lines):
line = self.lines[i]
if re.match(r"^#{1,6}\s+.*$", line):
# header
element = self._extract_header(i)
sections.append(element if include_meta else element["content"])
i = element["end_line"] + 1
elif line.strip().startswith("```"):
# code block
element = self._extract_code_block(i)
sections.append(element if include_meta else element["content"])
i = element["end_line"] + 1
elif re.match(r"^\s*[-*+]\s+.*$", line) or re.match(r"^\s*\d+\.\s+.*$", line):
# list block
element = self._extract_list_block(i)
sections.append(element if include_meta else element["content"])
i = element["end_line"] + 1
elif line.strip().startswith(">"):
# blockquote
element = self._extract_blockquote(i)
sections.append(element if include_meta else element["content"])
i = element["end_line"] + 1
elif line.strip():
# text block (paragraphs and inline elements until next block element)
element = self._extract_text_block(i)
sections.append(element if include_meta else element["content"])
i = element["end_line"] + 1
else:
i += 1
if include_meta:
sections = [section for section in sections if section["content"].strip()]
else:
sections = [section for section in sections if section.strip()]
return sections
def _extract_header(self, start_pos):
return {
"type": "header",
"content": self.lines[start_pos],
"start_line": start_pos,
"end_line": start_pos,
}
def _extract_code_block(self, start_pos):
end_pos = start_pos
content_lines = [self.lines[start_pos]]
# Find the end of the code block
for i in range(start_pos + 1, len(self.lines)):
content_lines.append(self.lines[i])
end_pos = i
if self.lines[i].strip().startswith("```"):
break
return {
"type": "code_block",
"content": "\n".join(content_lines),
"start_line": start_pos,
"end_line": end_pos,
}
def _extract_list_block(self, start_pos):
end_pos = start_pos
content_lines = []
i = start_pos
while i < len(self.lines):
line = self.lines[i]
# check if this line is a list item or continuation of a list
if (
re.match(r"^\s*[-*+]\s+.*$", line)
or re.match(r"^\s*\d+\.\s+.*$", line)
or (i > start_pos and not line.strip())
or (i > start_pos and re.match(r"^\s{2,}[-*+]\s+.*$", line))
or (i > start_pos and re.match(r"^\s{2,}\d+\.\s+.*$", line))
or (i > start_pos and re.match(r"^\s+\w+.*$", line))
):
content_lines.append(line)
end_pos = i
i += 1
else:
break
return {
"type": "list_block",
"content": "\n".join(content_lines),
"start_line": start_pos,
"end_line": end_pos,
}
def _extract_blockquote(self, start_pos):
end_pos = start_pos
content_lines = []
i = start_pos
while i < len(self.lines):
line = self.lines[i]
if line.strip().startswith(">") or (i > start_pos and not line.strip()):
content_lines.append(line)
end_pos = i
i += 1
else:
break
return {
"type": "blockquote",
"content": "\n".join(content_lines),
"start_line": start_pos,
"end_line": end_pos,
}
def _extract_text_block(self, start_pos):
"""Extract a text block (paragraphs, inline elements) until next block element"""
end_pos = start_pos
content_lines = [self.lines[start_pos]]
i = start_pos + 1
while i < len(self.lines):
line = self.lines[i]
# stop if we encounter a block element
if re.match(r"^#{1,6}\s+.*$", line) or line.strip().startswith("```") or re.match(r"^\s*[-*+]\s+.*$", line) or re.match(r"^\s*\d+\.\s+.*$", line) or line.strip().startswith(">"):
break
elif not line.strip():
# check if the next line is a block element
if i + 1 < len(self.lines) and (
re.match(r"^#{1,6}\s+.*$", self.lines[i + 1])
or self.lines[i + 1].strip().startswith("```")
or re.match(r"^\s*[-*+]\s+.*$", self.lines[i + 1])
or re.match(r"^\s*\d+\.\s+.*$", self.lines[i + 1])
or self.lines[i + 1].strip().startswith(">")
):
break
else:
content_lines.append(line)
end_pos = i
i += 1
else:
content_lines.append(line)
end_pos = i
i += 1
return {
"type": "text_block",
"content": "\n".join(content_lines),
"start_line": start_pos,
"end_line": end_pos,
}
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/json_parser.py | deepdoc/parser/json_parser.py | # -*- coding: utf-8 -*-
#
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The following documents are mainly referenced, and only adaptation modifications have been made
# from https://github.com/langchain-ai/langchain/blob/master/libs/text-splitters/langchain_text_splitters/json.py
import json
from typing import Any
from rag.nlp import find_codec
class RAGFlowJsonParser:
def __init__(self, max_chunk_size: int = 2000, min_chunk_size: int | None = None):
super().__init__()
self.max_chunk_size = max_chunk_size * 2
self.min_chunk_size = min_chunk_size if min_chunk_size is not None else max(max_chunk_size - 200, 50)
def __call__(self, binary):
encoding = find_codec(binary)
txt = binary.decode(encoding, errors="ignore")
if self.is_jsonl_format(txt):
sections = self._parse_jsonl(txt)
else:
sections = self._parse_json(txt)
return sections
@staticmethod
def _json_size(data: dict) -> int:
"""Calculate the size of the serialized JSON object."""
return len(json.dumps(data, ensure_ascii=False))
@staticmethod
def _set_nested_dict(d: dict, path: list[str], value: Any) -> None:
"""Set a value in a nested dictionary based on the given path."""
for key in path[:-1]:
d = d.setdefault(key, {})
d[path[-1]] = value
def _list_to_dict_preprocessing(self, data: Any) -> Any:
if isinstance(data, dict):
# Process each key-value pair in the dictionary
return {k: self._list_to_dict_preprocessing(v) for k, v in data.items()}
elif isinstance(data, list):
# Convert the list to a dictionary with index-based keys
return {str(i): self._list_to_dict_preprocessing(item) for i, item in enumerate(data)}
else:
# Base case: the item is neither a dict nor a list, so return it unchanged
return data
def _json_split(
self,
data,
current_path: list[str] | None,
chunks: list[dict] | None,
) -> list[dict]:
"""
Split json into maximum size dictionaries while preserving structure.
"""
current_path = current_path or []
chunks = chunks or [{}]
if isinstance(data, dict):
for key, value in data.items():
new_path = current_path + [key]
chunk_size = self._json_size(chunks[-1])
size = self._json_size({key: value})
remaining = self.max_chunk_size - chunk_size
if size < remaining:
# Add item to current chunk
self._set_nested_dict(chunks[-1], new_path, value)
else:
if chunk_size >= self.min_chunk_size:
# Chunk is big enough, start a new chunk
chunks.append({})
# Iterate
self._json_split(value, new_path, chunks)
else:
# handle single item
self._set_nested_dict(chunks[-1], current_path, data)
return chunks
def split_json(
self,
json_data,
convert_lists: bool = False,
) -> list[dict]:
"""Splits JSON into a list of JSON chunks"""
if convert_lists:
preprocessed_data = self._list_to_dict_preprocessing(json_data)
chunks = self._json_split(preprocessed_data, None, None)
else:
chunks = self._json_split(json_data, None, None)
# Remove the last chunk if it's empty
if not chunks[-1]:
chunks.pop()
return chunks
def split_text(
self,
json_data: dict[str, Any],
convert_lists: bool = False,
ensure_ascii: bool = True,
) -> list[str]:
"""Splits JSON into a list of JSON formatted strings"""
chunks = self.split_json(json_data=json_data, convert_lists=convert_lists)
# Convert to string
return [json.dumps(chunk, ensure_ascii=ensure_ascii) for chunk in chunks]
def _parse_json(self, content: str) -> list[str]:
sections = []
try:
json_data = json.loads(content)
chunks = self.split_json(json_data, True)
sections = [json.dumps(line, ensure_ascii=False) for line in chunks if line]
except json.JSONDecodeError:
pass
return sections
def _parse_jsonl(self, content: str) -> list[str]:
lines = content.strip().splitlines()
all_chunks = []
for line in lines:
if not line.strip():
continue
try:
data = json.loads(line)
chunks = self.split_json(data, convert_lists=True)
all_chunks.extend(json.dumps(chunk, ensure_ascii=False) for chunk in chunks if chunk)
except json.JSONDecodeError:
continue
return all_chunks
def is_jsonl_format(self, txt: str, sample_limit: int = 10, threshold: float = 0.8) -> bool:
lines = [line.strip() for line in txt.strip().splitlines() if line.strip()]
if not lines:
return False
try:
json.loads(txt)
return False
except json.JSONDecodeError:
pass
sample_limit = min(len(lines), sample_limit)
sample_lines = lines[:sample_limit]
valid_lines = sum(1 for line in sample_lines if self._is_valid_json(line))
if not valid_lines:
return False
return (valid_lines / len(sample_lines)) >= threshold
def _is_valid_json(self, line: str) -> bool:
try:
json.loads(line)
return True
except json.JSONDecodeError:
return False
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/ppt_parser.py | deepdoc/parser/ppt_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from io import BytesIO
from pptx import Presentation
class RAGFlowPptParser:
def __init__(self):
super().__init__()
def __get_bulleted_text(self, paragraph):
is_bulleted = bool(paragraph._p.xpath("./a:pPr/a:buChar")) or bool(paragraph._p.xpath("./a:pPr/a:buAutoNum")) or bool(paragraph._p.xpath("./a:pPr/a:buBlip"))
if is_bulleted:
return f"{' '* paragraph.level}.{paragraph.text}"
else:
return paragraph.text
def __extract(self, shape):
try:
# First try to get text content
if hasattr(shape, 'has_text_frame') and shape.has_text_frame:
text_frame = shape.text_frame
texts = []
for paragraph in text_frame.paragraphs:
if paragraph.text.strip():
texts.append(self.__get_bulleted_text(paragraph))
return "\n".join(texts)
# Safely get shape_type
try:
shape_type = shape.shape_type
except NotImplementedError:
# If shape_type is not available, try to get text content
if hasattr(shape, 'text'):
return shape.text.strip()
return ""
# Handle table
if shape_type == 19:
tb = shape.table
rows = []
for i in range(1, len(tb.rows)):
rows.append("; ".join([tb.cell(
0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
return "\n".join(rows)
# Handle group shape
if shape_type == 6:
texts = []
for p in sorted(shape.shapes, key=lambda x: (x.top // 10, x.left)):
t = self.__extract(p)
if t:
texts.append(t)
return "\n".join(texts)
return ""
except Exception as e:
logging.error(f"Error processing shape: {str(e)}")
return ""
def __call__(self, fnm, from_page, to_page, callback=None):
ppt = Presentation(fnm) if isinstance(
fnm, str) else Presentation(
BytesIO(fnm))
txts = []
self.total_page = len(ppt.slides)
for i, slide in enumerate(ppt.slides):
if i < from_page:
continue
if i >= to_page:
break
texts = []
for shape in sorted(
slide.shapes, key=lambda x: ((x.top if x.top is not None else 0) // 10, x.left if x.left is not None else 0)):
txt = self.__extract(shape)
if txt:
texts.append(txt)
txts.append("\n".join(texts))
return txts
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/__init__.py | deepdoc/parser/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .docx_parser import RAGFlowDocxParser as DocxParser
from .excel_parser import RAGFlowExcelParser as ExcelParser
from .html_parser import RAGFlowHtmlParser as HtmlParser
from .json_parser import RAGFlowJsonParser as JsonParser
from .markdown_parser import MarkdownElementExtractor
from .markdown_parser import RAGFlowMarkdownParser as MarkdownParser
from .pdf_parser import PlainParser
from .pdf_parser import RAGFlowPdfParser as PdfParser
from .ppt_parser import RAGFlowPptParser as PptParser
from .txt_parser import RAGFlowTxtParser as TxtParser
__all__ = [
"PdfParser",
"PlainParser",
"DocxParser",
"ExcelParser",
"PptParser",
"HtmlParser",
"JsonParser",
"MarkdownParser",
"TxtParser",
"MarkdownElementExtractor",
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/pdf_parser.py | deepdoc/parser/pdf_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import math
import os
import random
import re
import sys
import threading
from collections import Counter, defaultdict
from copy import deepcopy
from io import BytesIO
from timeit import default_timer as timer
import numpy as np
import pdfplumber
import xgboost as xgb
from huggingface_hub import snapshot_download
from PIL import Image
from pypdf import PdfReader as pdf2_read
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from common.file_utils import get_project_base_directory
from common.misc_utils import pip_install_torch
from deepdoc.vision import OCR, AscendLayoutRecognizer, LayoutRecognizer, Recognizer, TableStructureRecognizer
from rag.nlp import rag_tokenizer
from rag.prompts.generator import vision_llm_describe_prompt
from common import settings
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
if LOCK_KEY_pdfplumber not in sys.modules:
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
class RAGFlowPdfParser:
def __init__(self, **kwargs):
"""
If you have trouble downloading HuggingFace models, -_^ this might help!!
For Linux:
export HF_ENDPOINT=https://hf-mirror.com
For Windows:
Good luck
^_-
"""
self.ocr = OCR()
self.parallel_limiter = None
if settings.PARALLEL_DEVICES > 1:
self.parallel_limiter = [asyncio.Semaphore(1) for _ in range(settings.PARALLEL_DEVICES)]
layout_recognizer_type = os.getenv("LAYOUT_RECOGNIZER_TYPE", "onnx").lower()
if layout_recognizer_type not in ["onnx", "ascend"]:
raise RuntimeError("Unsupported layout recognizer type.")
if hasattr(self, "model_speciess"):
recognizer_domain = "layout." + self.model_speciess
else:
recognizer_domain = "layout"
if layout_recognizer_type == "ascend":
logging.debug("Using Ascend LayoutRecognizer")
self.layouter = AscendLayoutRecognizer(recognizer_domain)
else: # onnx
logging.debug("Using Onnx LayoutRecognizer")
self.layouter = LayoutRecognizer(recognizer_domain)
self.tbl_det = TableStructureRecognizer()
self.updown_cnt_mdl = xgb.Booster()
try:
pip_install_torch()
import torch.cuda
if torch.cuda.is_available():
self.updown_cnt_mdl.set_param({"device": "cuda"})
except Exception:
logging.info("No torch found.")
try:
model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc")
self.updown_cnt_mdl.load_model(os.path.join(model_dir, "updown_concat_xgb.model"))
except Exception:
model_dir = snapshot_download(repo_id="InfiniFlow/text_concat_xgb_v1.0", local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), local_dir_use_symlinks=False)
self.updown_cnt_mdl.load_model(os.path.join(model_dir, "updown_concat_xgb.model"))
self.page_from = 0
self.column_num = 1
def __char_width(self, c):
return (c["x1"] - c["x0"]) // max(len(c["text"]), 1)
def __height(self, c):
return c["bottom"] - c["top"]
def _x_dis(self, a, b):
return min(abs(a["x1"] - b["x0"]), abs(a["x0"] - b["x1"]), abs(a["x0"] + a["x1"] - b["x0"] - b["x1"]) / 2)
def _y_dis(self, a, b):
return (b["top"] + b["bottom"] - a["top"] - a["bottom"]) / 2
def _match_proj(self, b):
proj_patt = [
r"第[零一二三四五六七八九十百]+章",
r"第[零一二三四五六七八九十百]+[条节]",
r"[零一二三四五六七八九十百]+[、是 ]",
r"[\((][零一二三四五六七八九十百]+[)\)]",
r"[\((][0-9]+[)\)]",
r"[0-9]+(、|\.[ ]|)|\.[^0-9./a-zA-Z_%><-]{4,})",
r"[0-9]+\.[0-9.]+(、|\.[ ])",
r"[⚫•➢①② ]",
]
return any([re.match(p, b["text"]) for p in proj_patt])
def _updown_concat_features(self, up, down):
w = max(self.__char_width(up), self.__char_width(down))
h = max(self.__height(up), self.__height(down))
y_dis = self._y_dis(up, down)
LEN = 6
tks_down = rag_tokenizer.tokenize(down["text"][:LEN]).split()
tks_up = rag_tokenizer.tokenize(up["text"][-LEN:]).split()
tks_all = up["text"][-LEN:].strip() + (" " if re.match(r"[a-zA-Z0-9]+", up["text"][-1] + down["text"][0]) else "") + down["text"][:LEN].strip()
tks_all = rag_tokenizer.tokenize(tks_all).split()
fea = [
up.get("R", -1) == down.get("R", -1),
y_dis / h,
down["page_number"] - up["page_number"],
up["layout_type"] == down["layout_type"],
up["layout_type"] == "text",
down["layout_type"] == "text",
up["layout_type"] == "table",
down["layout_type"] == "table",
True if re.search(r"([。?!;!?;+))]|[a-z]\.)$", up["text"]) else False,
True if re.search(r"[,:‘“、0-9(+-]$", up["text"]) else False,
True if re.search(r"(^.?[/,?;:\],。;:’”?!》】)-])", down["text"]) else False,
True if re.match(r"[\((][^\(\)()]+[)\)]$", up["text"]) else False,
True if re.search(r"[,,][^。.]+$", up["text"]) else False,
True if re.search(r"[,,][^。.]+$", up["text"]) else False,
True if re.search(r"[\((][^\))]+$", up["text"]) and re.search(r"[\))]", down["text"]) else False,
self._match_proj(down),
True if re.match(r"[A-Z]", down["text"]) else False,
True if re.match(r"[A-Z]", up["text"][-1]) else False,
True if re.match(r"[a-z0-9]", up["text"][-1]) else False,
True if re.match(r"[0-9.%,-]+$", down["text"]) else False,
up["text"].strip()[-2:] == down["text"].strip()[-2:] if len(up["text"].strip()) > 1 and len(down["text"].strip()) > 1 else False,
up["x0"] > down["x1"],
abs(self.__height(up) - self.__height(down)) / min(self.__height(up), self.__height(down)),
self._x_dis(up, down) / max(w, 0.000001),
(len(up["text"]) - len(down["text"])) / max(len(up["text"]), len(down["text"])),
len(tks_all) - len(tks_up) - len(tks_down),
len(tks_down) - len(tks_up),
tks_down[-1] == tks_up[-1] if tks_down and tks_up else False,
max(down["in_row"], up["in_row"]),
abs(down["in_row"] - up["in_row"]),
len(tks_down) == 1 and rag_tokenizer.tag(tks_down[0]).find("n") >= 0,
len(tks_up) == 1 and rag_tokenizer.tag(tks_up[0]).find("n") >= 0,
]
return fea
@staticmethod
def sort_X_by_page(arr, threshold):
# sort using y1 first and then x1
arr = sorted(arr, key=lambda r: (r["page_number"], r["x0"], r["top"]))
for i in range(len(arr) - 1):
for j in range(i, -1, -1):
# restore the order using th
if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threshold and arr[j + 1]["top"] < arr[j]["top"] and arr[j + 1]["page_number"] == arr[j]["page_number"]:
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
def _has_color(self, o):
if o.get("ncs", "") == "DeviceGray":
if o["stroking_color"] and o["stroking_color"][0] == 1 and o["non_stroking_color"] and o["non_stroking_color"][0] == 1:
if re.match(r"[a-zT_\[\]\(\)-]+", o.get("text", "")):
return False
return True
def _table_transformer_job(self, ZM):
logging.debug("Table processing...")
imgs, pos = [], []
tbcnt = [0]
MARGIN = 10
self.tb_cpns = []
assert len(self.page_layout) == len(self.page_images)
for p, tbls in enumerate(self.page_layout): # for page
tbls = [f for f in tbls if f["type"] == "table"]
tbcnt.append(len(tbls))
if not tbls:
continue
for tb in tbls: # for table
left, top, right, bott = tb["x0"] - MARGIN, tb["top"] - MARGIN, tb["x1"] + MARGIN, tb["bottom"] + MARGIN
left *= ZM
top *= ZM
right *= ZM
bott *= ZM
pos.append((left, top))
imgs.append(self.page_images[p].crop((left, top, right, bott)))
assert len(self.page_images) == len(tbcnt) - 1
if not imgs:
return
recos = self.tbl_det(imgs)
tbcnt = np.cumsum(tbcnt)
for i in range(len(tbcnt) - 1): # for page
pg = []
for j, tb_items in enumerate(recos[tbcnt[i] : tbcnt[i + 1]]): # for table
poss = pos[tbcnt[i] : tbcnt[i + 1]]
for it in tb_items: # for table components
it["x0"] = it["x0"] + poss[j][0]
it["x1"] = it["x1"] + poss[j][0]
it["top"] = it["top"] + poss[j][1]
it["bottom"] = it["bottom"] + poss[j][1]
for n in ["x0", "x1", "top", "bottom"]:
it[n] /= ZM
it["top"] += self.page_cum_height[i]
it["bottom"] += self.page_cum_height[i]
it["pn"] = i
it["layoutno"] = j
pg.append(it)
self.tb_cpns.extend(pg)
def gather(kwd, fzy=10, ption=0.6):
eles = Recognizer.sort_Y_firstly([r for r in self.tb_cpns if re.match(kwd, r["label"])], fzy)
eles = Recognizer.layouts_cleanup(self.boxes, eles, 5, ption)
return Recognizer.sort_Y_firstly(eles, 0)
# add R,H,C,SP tag to boxes within table layout
headers = gather(r".*header$")
rows = gather(r".* (row|header)")
spans = gather(r".*spanning")
clmns = sorted([r for r in self.tb_cpns if re.match(r"table column$", r["label"])], key=lambda x: (x["pn"], x["layoutno"], x["x0"]))
clmns = Recognizer.layouts_cleanup(self.boxes, clmns, 5, 0.5)
for b in self.boxes:
if b.get("layout_type", "") != "table":
continue
ii = Recognizer.find_overlapped_with_threshold(b, rows, thr=0.3)
if ii is not None:
b["R"] = ii
b["R_top"] = rows[ii]["top"]
b["R_bott"] = rows[ii]["bottom"]
ii = Recognizer.find_overlapped_with_threshold(b, headers, thr=0.3)
if ii is not None:
b["H_top"] = headers[ii]["top"]
b["H_bott"] = headers[ii]["bottom"]
b["H_left"] = headers[ii]["x0"]
b["H_right"] = headers[ii]["x1"]
b["H"] = ii
ii = Recognizer.find_horizontally_tightest_fit(b, clmns)
if ii is not None:
b["C"] = ii
b["C_left"] = clmns[ii]["x0"]
b["C_right"] = clmns[ii]["x1"]
ii = Recognizer.find_overlapped_with_threshold(b, spans, thr=0.3)
if ii is not None:
b["H_top"] = spans[ii]["top"]
b["H_bott"] = spans[ii]["bottom"]
b["H_left"] = spans[ii]["x0"]
b["H_right"] = spans[ii]["x1"]
b["SP"] = ii
def __ocr(self, pagenum, img, chars, ZM=3, device_id: int | None = None):
start = timer()
bxs = self.ocr.detect(np.array(img), device_id)
logging.info(f"__ocr detecting boxes of a image cost ({timer() - start}s)")
start = timer()
if not bxs:
self.boxes.append([])
return
bxs = [(line[0], line[1][0]) for line in bxs]
bxs = Recognizer.sort_Y_firstly(
[
{"x0": b[0][0] / ZM, "x1": b[1][0] / ZM, "top": b[0][1] / ZM, "text": "", "txt": t, "bottom": b[-1][1] / ZM, "chars": [], "page_number": pagenum}
for b, t in bxs
if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]
],
self.mean_height[pagenum - 1] / 3,
)
# merge chars in the same rect
for c in chars:
ii = Recognizer.find_overlapped(c, bxs)
if ii is None:
self.lefted_chars.append(c)
continue
ch = c["bottom"] - c["top"]
bh = bxs[ii]["bottom"] - bxs[ii]["top"]
if abs(ch - bh) / max(ch, bh) >= 0.7 and c["text"] != " ":
self.lefted_chars.append(c)
continue
bxs[ii]["chars"].append(c)
for b in bxs:
if not b["chars"]:
del b["chars"]
continue
m_ht = np.mean([c["height"] for c in b["chars"]])
for c in Recognizer.sort_Y_firstly(b["chars"], m_ht):
if c["text"] == " " and b["text"]:
if re.match(r"[0-9a-zA-Zа-яА-Я,.?;:!%%]", b["text"][-1]):
b["text"] += " "
else:
b["text"] += c["text"]
del b["chars"]
logging.info(f"__ocr sorting {len(chars)} chars cost {timer() - start}s")
start = timer()
boxes_to_reg = []
img_np = np.array(img)
for b in bxs:
if not b["text"]:
left, right, top, bott = b["x0"] * ZM, b["x1"] * ZM, b["top"] * ZM, b["bottom"] * ZM
b["box_image"] = self.ocr.get_rotate_crop_image(img_np, np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32))
boxes_to_reg.append(b)
del b["txt"]
texts = self.ocr.recognize_batch([b["box_image"] for b in boxes_to_reg], device_id)
for i in range(len(boxes_to_reg)):
boxes_to_reg[i]["text"] = texts[i]
del boxes_to_reg[i]["box_image"]
logging.info(f"__ocr recognize {len(bxs)} boxes cost {timer() - start}s")
bxs = [b for b in bxs if b["text"]]
if self.mean_height[pagenum - 1] == 0:
self.mean_height[pagenum - 1] = np.median([b["bottom"] - b["top"] for b in bxs])
self.boxes.append(bxs)
def _layouts_rec(self, ZM, drop=True):
assert len(self.page_images) == len(self.boxes)
self.boxes, self.page_layout = self.layouter(self.page_images, self.boxes, ZM, drop=drop)
# cumlative Y
for i in range(len(self.boxes)):
self.boxes[i]["top"] += self.page_cum_height[self.boxes[i]["page_number"] - 1]
self.boxes[i]["bottom"] += self.page_cum_height[self.boxes[i]["page_number"] - 1]
def _assign_column(self, boxes, zoomin=3):
if not boxes:
return boxes
if all("col_id" in b for b in boxes):
return boxes
by_page = defaultdict(list)
for b in boxes:
by_page[b["page_number"]].append(b)
page_cols = {}
for pg, bxs in by_page.items():
if not bxs:
page_cols[pg] = 1
continue
x0s_raw = np.array([b["x0"] for b in bxs], dtype=float)
min_x0 = np.min(x0s_raw)
max_x1 = np.max([b["x1"] for b in bxs])
width = max_x1 - min_x0
INDENT_TOL = width * 0.12
x0s = []
for x in x0s_raw:
if abs(x - min_x0) < INDENT_TOL:
x0s.append([min_x0])
else:
x0s.append([x])
x0s = np.array(x0s, dtype=float)
max_try = min(4, len(bxs))
if max_try < 2:
max_try = 1
best_k = 1
best_score = -1
for k in range(1, max_try + 1):
km = KMeans(n_clusters=k, n_init="auto")
labels = km.fit_predict(x0s)
centers = np.sort(km.cluster_centers_.flatten())
if len(centers) > 1:
try:
score = silhouette_score(x0s, labels)
except ValueError:
continue
else:
score = 0
if score > best_score:
best_score = score
best_k = k
page_cols[pg] = best_k
logging.info(f"[Page {pg}] best_score={best_score:.2f}, best_k={best_k}")
global_cols = Counter(page_cols.values()).most_common(1)[0][0]
logging.info(f"Global column_num decided by majority: {global_cols}")
for pg, bxs in by_page.items():
if not bxs:
continue
k = page_cols[pg]
if len(bxs) < k:
k = 1
x0s = np.array([[b["x0"]] for b in bxs], dtype=float)
km = KMeans(n_clusters=k, n_init="auto")
labels = km.fit_predict(x0s)
centers = km.cluster_centers_.flatten()
order = np.argsort(centers)
remap = {orig: new for new, orig in enumerate(order)}
for b, lb in zip(bxs, labels):
b["col_id"] = remap[lb]
grouped = defaultdict(list)
for b in bxs:
grouped[b["col_id"]].append(b)
return boxes
def _text_merge(self, zoomin=3):
# merge adjusted boxes
bxs = self._assign_column(self.boxes, zoomin)
def end_with(b, txt):
txt = txt.strip()
tt = b.get("text", "").strip()
return tt and tt.find(txt) == len(tt) - len(txt)
def start_with(b, txts):
tt = b.get("text", "").strip()
return tt and any([tt.find(t.strip()) == 0 for t in txts])
# horizontally merge adjacent box with the same layout
i = 0
while i < len(bxs) - 1:
b = bxs[i]
b_ = bxs[i + 1]
if b["page_number"] != b_["page_number"] or b.get("col_id") != b_.get("col_id"):
i += 1
continue
if b.get("layoutno", "0") != b_.get("layoutno", "1") or b.get("layout_type", "") in ["table", "figure", "equation"]:
i += 1
continue
if abs(self._y_dis(b, b_)) < self.mean_height[bxs[i]["page_number"] - 1] / 3:
# merge
bxs[i]["x1"] = b_["x1"]
bxs[i]["top"] = (b["top"] + b_["top"]) / 2
bxs[i]["bottom"] = (b["bottom"] + b_["bottom"]) / 2
bxs[i]["text"] += b_["text"]
bxs.pop(i + 1)
continue
i += 1
self.boxes = bxs
def _naive_vertical_merge(self, zoomin=3):
#bxs = self._assign_column(self.boxes, zoomin)
bxs = self.boxes
grouped = defaultdict(list)
for b in bxs:
# grouped[(b["page_number"], b.get("col_id", 0))].append(b)
grouped[(b["page_number"], "x")].append(b)
merged_boxes = []
for (pg, col), bxs in grouped.items():
bxs = sorted(bxs, key=lambda x: (x["top"], x["x0"]))
if not bxs:
continue
mh = self.mean_height[pg - 1] if self.mean_height else np.median([b["bottom"] - b["top"] for b in bxs]) or 10
i = 0
while i + 1 < len(bxs):
b = bxs[i]
b_ = bxs[i + 1]
if b["page_number"] < b_["page_number"] and re.match(r"[0-9 •一—-]+$", b["text"]):
bxs.pop(i)
continue
if not b["text"].strip():
bxs.pop(i)
continue
if not b["text"].strip() or b.get("layoutno") != b_.get("layoutno"):
i += 1
continue
if b_["top"] - b["bottom"] > mh * 1.5:
i += 1
continue
overlap = max(0, min(b["x1"], b_["x1"]) - max(b["x0"], b_["x0"]))
if overlap / max(1, min(b["x1"] - b["x0"], b_["x1"] - b_["x0"])) < 0.3:
i += 1
continue
concatting_feats = [
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
len(b["text"].strip()) > 1 and b["text"].strip()[-2] in ",;:'\",‘“、;:",
b_["text"].strip() and b_["text"].strip()[0] in "。;?!?”)),,、:",
]
# features for not concating
feats = [
b.get("layoutno", 0) != b_.get("layoutno", 0),
b["text"].strip()[-1] in "。?!?",
self.is_english and b["text"].strip()[-1] in ".!?",
b["page_number"] == b_["page_number"] and b_["top"] - b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5,
b["page_number"] < b_["page_number"] and abs(b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4,
]
# split features
detach_feats = [b["x1"] < b_["x0"], b["x0"] > b_["x1"]]
if (any(feats) and not any(concatting_feats)) or any(detach_feats):
logging.debug(
"{} {} {} {}".format(
b["text"],
b_["text"],
any(feats),
any(concatting_feats),
)
)
i += 1
continue
b["text"] = (b["text"].rstrip() + " " + b_["text"].lstrip()).strip()
b["bottom"] = b_["bottom"]
b["x0"] = min(b["x0"], b_["x0"])
b["x1"] = max(b["x1"], b_["x1"])
bxs.pop(i + 1)
merged_boxes.extend(bxs)
#self.boxes = sorted(merged_boxes, key=lambda x: (x["page_number"], x.get("col_id", 0), x["top"]))
def _final_reading_order_merge(self, zoomin=3):
if not self.boxes:
return
self.boxes = self._assign_column(self.boxes, zoomin=zoomin)
pages = defaultdict(lambda: defaultdict(list))
for b in self.boxes:
pg = b["page_number"]
col = b.get("col_id", 0)
pages[pg][col].append(b)
for pg in pages:
for col in pages[pg]:
pages[pg][col].sort(key=lambda x: (x["top"], x["x0"]))
new_boxes = []
for pg in sorted(pages.keys()):
for col in sorted(pages[pg].keys()):
new_boxes.extend(pages[pg][col])
self.boxes = new_boxes
def _concat_downward(self, concat_between_pages=True):
self.boxes = Recognizer.sort_Y_firstly(self.boxes, 0)
return
# count boxes in the same row as a feature
for i in range(len(self.boxes)):
mh = self.mean_height[self.boxes[i]["page_number"] - 1]
self.boxes[i]["in_row"] = 0
j = max(0, i - 12)
while j < min(i + 12, len(self.boxes)):
if j == i:
j += 1
continue
ydis = self._y_dis(self.boxes[i], self.boxes[j]) / mh
if abs(ydis) < 1:
self.boxes[i]["in_row"] += 1
elif ydis > 0:
break
j += 1
# concat between rows
boxes = deepcopy(self.boxes)
blocks = []
while boxes:
chunks = []
def dfs(up, dp):
chunks.append(up)
i = dp
while i < min(dp + 12, len(boxes)):
ydis = self._y_dis(up, boxes[i])
smpg = up["page_number"] == boxes[i]["page_number"]
mh = self.mean_height[up["page_number"] - 1]
mw = self.mean_width[up["page_number"] - 1]
if smpg and ydis > mh * 4:
break
if not smpg and ydis > mh * 16:
break
down = boxes[i]
if not concat_between_pages and down["page_number"] > up["page_number"]:
break
if up.get("R", "") != down.get("R", "") and up["text"][-1] != ",":
i += 1
continue
if re.match(r"[0-9]{2,3}/[0-9]{3}$", up["text"]) or re.match(r"[0-9]{2,3}/[0-9]{3}$", down["text"]) or not down["text"].strip():
i += 1
continue
if not down["text"].strip() or not up["text"].strip():
i += 1
continue
if up["x1"] < down["x0"] - 10 * mw or up["x0"] > down["x1"] + 10 * mw:
i += 1
continue
if i - dp < 5 and up.get("layout_type") == "text":
if up.get("layoutno", "1") == down.get("layoutno", "2"):
dfs(down, i + 1)
boxes.pop(i)
return
i += 1
continue
fea = self._updown_concat_features(up, down)
if self.updown_cnt_mdl.predict(xgb.DMatrix([fea]))[0] <= 0.5:
i += 1
continue
dfs(down, i + 1)
boxes.pop(i)
return
dfs(boxes[0], 1)
boxes.pop(0)
if chunks:
blocks.append(chunks)
# concat within each block
boxes = []
for b in blocks:
if len(b) == 1:
boxes.append(b[0])
continue
t = b[0]
for c in b[1:]:
t["text"] = t["text"].strip()
c["text"] = c["text"].strip()
if not c["text"]:
continue
if t["text"] and re.match(r"[0-9\.a-zA-Z]+$", t["text"][-1] + c["text"][-1]):
t["text"] += " "
t["text"] += c["text"]
t["x0"] = min(t["x0"], c["x0"])
t["x1"] = max(t["x1"], c["x1"])
t["page_number"] = min(t["page_number"], c["page_number"])
t["bottom"] = c["bottom"]
if not t["layout_type"] and c["layout_type"]:
t["layout_type"] = c["layout_type"]
boxes.append(t)
self.boxes = Recognizer.sort_Y_firstly(boxes, 0)
def _filter_forpages(self):
if not self.boxes:
return
findit = False
i = 0
while i < len(self.boxes):
if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$", re.sub(r"( | |\u3000)+", "", self.boxes[i]["text"].lower())):
i += 1
continue
findit = True
eng = re.match(r"[0-9a-zA-Z :'.-]{5,}", self.boxes[i]["text"].strip())
self.boxes.pop(i)
if i >= len(self.boxes):
break
prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join(self.boxes[i]["text"].strip().split()[:2])
while not prefix:
self.boxes.pop(i)
if i >= len(self.boxes):
break
prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join(self.boxes[i]["text"].strip().split()[:2])
self.boxes.pop(i)
if i >= len(self.boxes) or not prefix:
break
for j in range(i, min(i + 128, len(self.boxes))):
if not re.match(prefix, self.boxes[j]["text"]):
continue
for k in range(i, j):
self.boxes.pop(i)
break
if findit:
return
page_dirty = [0] * len(self.page_images)
for b in self.boxes:
if re.search(r"(··|··|··)", b["text"]):
page_dirty[b["page_number"] - 1] += 1
page_dirty = set([i + 1 for i, t in enumerate(page_dirty) if t > 3])
if not page_dirty:
return
i = 0
while i < len(self.boxes):
if self.boxes[i]["page_number"] in page_dirty:
self.boxes.pop(i)
continue
i += 1
def _merge_with_same_bullet(self):
i = 0
while i + 1 < len(self.boxes):
b = self.boxes[i]
b_ = self.boxes[i + 1]
if not b["text"].strip():
self.boxes.pop(i)
continue
if not b_["text"].strip():
self.boxes.pop(i + 1)
continue
if (
b["text"].strip()[0] != b_["text"].strip()[0]
or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm")
or rag_tokenizer.is_chinese(b["text"].strip()[0])
or b["top"] > b_["bottom"]
):
i += 1
continue
b_["text"] = b["text"] + "\n" + b_["text"]
b_["x0"] = min(b["x0"], b_["x0"])
b_["x1"] = max(b["x1"], b_["x1"])
b_["top"] = b["top"]
self.boxes.pop(i)
def _extract_table_figure(self, need_image, ZM, return_html, need_position, separate_tables_figures=False):
tables = {}
figures = {}
# extract figure and table boxes
i = 0
lst_lout_no = ""
nomerge_lout_no = []
while i < len(self.boxes):
if "layoutno" not in self.boxes[i]:
i += 1
continue
lout_no = str(self.boxes[i]["page_number"]) + "-" + str(self.boxes[i]["layoutno"])
if TableStructureRecognizer.is_caption(self.boxes[i]) or self.boxes[i]["layout_type"] in ["table caption", "title", "figure caption", "reference"]:
nomerge_lout_no.append(lst_lout_no)
if self.boxes[i]["layout_type"] == "table":
if re.match(r"(数据|资料|图表)*来源[:: ]", self.boxes[i]["text"]):
self.boxes.pop(i)
continue
if lout_no not in tables:
tables[lout_no] = []
tables[lout_no].append(self.boxes[i])
self.boxes.pop(i)
lst_lout_no = lout_no
continue
if need_image and self.boxes[i]["layout_type"] == "figure":
if re.match(r"(数据|资料|图表)*来源[:: ]", self.boxes[i]["text"]):
self.boxes.pop(i)
continue
if lout_no not in figures:
figures[lout_no] = []
figures[lout_no].append(self.boxes[i])
self.boxes.pop(i)
lst_lout_no = lout_no
continue
i += 1
# merge table on different pages
nomerge_lout_no = set(nomerge_lout_no)
tbls = sorted([(k, bxs) for k, bxs in tables.items()], key=lambda x: (x[1][0]["top"], x[1][0]["x0"]))
i = len(tbls) - 1
while i - 1 >= 0:
k0, bxs0 = tbls[i - 1]
k, bxs = tbls[i]
i -= 1
if k0 in nomerge_lout_no:
continue
if bxs[0]["page_number"] == bxs0[0]["page_number"]:
continue
if bxs[0]["page_number"] - bxs0[0]["page_number"] > 1:
continue
mh = self.mean_height[bxs[0]["page_number"] - 1]
if self._y_dis(bxs0[-1], bxs[0]) > mh * 23:
continue
tables[k0].extend(tables[k])
del tables[k]
def x_overlapped(a, b):
return not any([a["x1"] < b["x0"], a["x0"] > b["x1"]])
# find captions and pop out
i = 0
while i < len(self.boxes):
c = self.boxes[i]
# mh = self.mean_height[c["page_number"]-1]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/docx_parser.py | deepdoc/parser/docx_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from docx import Document
import re
import pandas as pd
from collections import Counter
from rag.nlp import rag_tokenizer
from io import BytesIO
class RAGFlowDocxParser:
def __extract_table_content(self, tb):
df = []
for row in tb.rows:
df.append([c.text for c in row.cells])
return self.__compose_table_content(pd.DataFrame(df))
def __compose_table_content(self, df):
def blockType(b):
pattern = [
("^(20|19)[0-9]{2}[年/-][0-9]{1,2}[月/-][0-9]{1,2}日*$", "Dt"),
(r"^(20|19)[0-9]{2}年$", "Dt"),
(r"^(20|19)[0-9]{2}[年/-][0-9]{1,2}月*$", "Dt"),
("^[0-9]{1,2}[月/-][0-9]{1,2}日*$", "Dt"),
(r"^第*[一二三四1-4]季度$", "Dt"),
(r"^(20|19)[0-9]{2}年*[一二三四1-4]季度$", "Dt"),
(r"^(20|19)[0-9]{2}[ABCDE]$", "DT"),
("^[0-9.,+%/ -]+$", "Nu"),
(r"^[0-9A-Z/\._~-]+$", "Ca"),
(r"^[A-Z]*[a-z' -]+$", "En"),
(r"^[0-9.,+-]+[0-9A-Za-z/$¥%<>()()' -]+$", "NE"),
(r"^.{1}$", "Sg")
]
for p, n in pattern:
if re.search(p, b):
return n
tks = [t for t in rag_tokenizer.tokenize(b).split() if len(t) > 1]
if len(tks) > 3:
if len(tks) < 12:
return "Tx"
else:
return "Lx"
if len(tks) == 1 and rag_tokenizer.tag(tks[0]) == "nr":
return "Nr"
return "Ot"
if len(df) < 2:
return []
max_type = Counter([blockType(str(df.iloc[i, j])) for i in range(
1, len(df)) for j in range(len(df.iloc[i, :]))])
max_type = max(max_type.items(), key=lambda x: x[1])[0]
colnm = len(df.iloc[0, :])
hdrows = [0] # header is not necessarily appear in the first line
if max_type == "Nu":
for r in range(1, len(df)):
tys = Counter([blockType(str(df.iloc[r, j]))
for j in range(len(df.iloc[r, :]))])
tys = max(tys.items(), key=lambda x: x[1])[0]
if tys != max_type:
hdrows.append(r)
lines = []
for i in range(1, len(df)):
if i in hdrows:
continue
hr = [r - i for r in hdrows]
hr = [r for r in hr if r < 0]
t = len(hr) - 1
while t > 0:
if hr[t] - hr[t - 1] > 1:
hr = hr[t:]
break
t -= 1
headers = []
for j in range(len(df.iloc[i, :])):
t = []
for h in hr:
x = str(df.iloc[i + h, j]).strip()
if x in t:
continue
t.append(x)
t = ",".join(t)
if t:
t += ": "
headers.append(t)
cells = []
for j in range(len(df.iloc[i, :])):
if not str(df.iloc[i, j]):
continue
cells.append(headers[j] + str(df.iloc[i, j]))
lines.append(";".join(cells))
if colnm > 3:
return lines
return ["\n".join(lines)]
def __call__(self, fnm, from_page=0, to_page=100000000):
self.doc = Document(fnm) if isinstance(
fnm, str) else Document(BytesIO(fnm))
pn = 0 # parsed page
secs = [] # parsed contents
for p in self.doc.paragraphs:
if pn > to_page:
break
runs_within_single_paragraph = [] # save runs within the range of pages
for run in p.runs:
if pn > to_page:
break
if from_page <= pn < to_page and p.text.strip():
runs_within_single_paragraph.append(run.text) # append run.text first
# wrap page break checker into a static method
if 'lastRenderedPageBreak' in run._element.xml:
pn += 1
secs.append(("".join(runs_within_single_paragraph), p.style.name if hasattr(p.style, 'name') else '')) # then concat run.text as part of the paragraph
tbls = [self.__extract_table_content(tb) for tb in self.doc.tables]
return secs, tbls
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/txt_parser.py | deepdoc/parser/txt_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from deepdoc.parser.utils import get_text
from common.token_utils import num_tokens_from_string
class RAGFlowTxtParser:
def __call__(self, fnm, binary=None, chunk_token_num=128, delimiter="\n!?;。;!?"):
txt = get_text(fnm, binary)
return self.parser_txt(txt, chunk_token_num, delimiter)
@classmethod
def parser_txt(cls, txt, chunk_token_num=128, delimiter="\n!?;。;!?"):
if not isinstance(txt, str):
raise TypeError("txt type should be str!")
cks = [""]
tk_nums = [0]
delimiter = delimiter.encode('utf-8').decode('unicode_escape').encode('latin1').decode('utf-8')
def add_chunk(t):
nonlocal cks, tk_nums, delimiter
tnum = num_tokens_from_string(t)
if tk_nums[-1] > chunk_token_num:
cks.append(t)
tk_nums.append(tnum)
else:
cks[-1] += t
tk_nums[-1] += tnum
dels = []
s = 0
for m in re.finditer(r"`([^`]+)`", delimiter, re.I):
f, t = m.span()
dels.append(m.group(1))
dels.extend(list(delimiter[s: f]))
s = t
if s < len(delimiter):
dels.extend(list(delimiter[s:]))
dels = [re.escape(d) for d in dels if d]
dels = [d for d in dels if d]
dels = "|".join(dels)
secs = re.split(r"(%s)" % dels, txt)
for sec in secs:
if re.match(f"^{dels}$", sec):
continue
add_chunk(sec)
return [[c, ""] for c in cks]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/docling_parser.py | deepdoc/parser/docling_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import logging
import re
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Iterable, Optional
import pdfplumber
from PIL import Image
try:
from docling.document_converter import DocumentConverter
except Exception:
DocumentConverter = None
try:
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
except Exception:
class RAGFlowPdfParser:
pass
class DoclingContentType(str, Enum):
IMAGE = "image"
TABLE = "table"
TEXT = "text"
EQUATION = "equation"
@dataclass
class _BBox:
page_no: int
x0: float
y0: float
x1: float
y1: float
class DoclingParser(RAGFlowPdfParser):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.page_images: list[Image.Image] = []
self.page_from = 0
self.page_to = 10_000
self.outlines = []
def check_installation(self) -> bool:
if DocumentConverter is None:
self.logger.warning("[Docling] 'docling' is not importable, please: pip install docling")
return False
try:
_ = DocumentConverter()
return True
except Exception as e:
self.logger.error(f"[Docling] init DocumentConverter failed: {e}")
return False
def __images__(self, fnm, zoomin: int = 1, page_from=0, page_to=600, callback=None):
self.page_from = page_from
self.page_to = page_to
bytes_io = None
try:
if not isinstance(fnm, (str, PathLike)):
bytes_io = BytesIO(fnm)
opener = pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(bytes_io)
with opener as pdf:
pages = pdf.pages[page_from:page_to]
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for p in pages]
except Exception as e:
self.page_images = []
self.logger.exception(e)
finally:
if bytes_io:
bytes_io.close()
def _make_line_tag(self,bbox: _BBox) -> str:
if bbox is None:
return ""
x0,x1, top, bott = bbox.x0, bbox.x1, bbox.y0, bbox.y1
if hasattr(self, "page_images") and self.page_images and len(self.page_images) >= bbox.page_no:
_, page_height = self.page_images[bbox.page_no-1].size
top, bott = page_height-top ,page_height-bott
return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##".format(
bbox.page_no, x0,x1, top, bott
)
@staticmethod
def extract_positions(txt: str) -> list[tuple[list[int], float, float, float, float]]:
poss = []
for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", txt):
pn, left, right, top, bottom = tag.strip("#").strip("@").split("\t")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom))
return poss
def crop(self, text: str, ZM: int = 1, need_position: bool = False):
imgs = []
poss = self.extract_positions(text)
if not poss:
return (None, None) if need_position else None
GAP = 6
pos = poss[0]
poss.insert(0, ([pos[0][0]], pos[1], pos[2], max(0, pos[3] - 120), max(pos[3] - GAP, 0)))
pos = poss[-1]
poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1], pos[4] + GAP), min(self.page_images[pos[0][-1]].size[1], pos[4] + 120)))
positions = []
for ii, (pns, left, right, top, bottom) in enumerate(poss):
if bottom <= top:
bottom = top + 4
img0 = self.page_images[pns[0]]
x0, y0, x1, y1 = int(left), int(top), int(right), int(min(bottom, img0.size[1]))
crop0 = img0.crop((x0, y0, x1, y1))
imgs.append(crop0)
if 0 < ii < len(poss)-1:
positions.append((pns[0] + self.page_from, x0, x1, y0, y1))
remain_bottom = bottom - img0.size[1]
for pn in pns[1:]:
if remain_bottom <= 0:
break
page = self.page_images[pn]
x0, y0, x1, y1 = int(left), 0, int(right), int(min(remain_bottom, page.size[1]))
cimgp = page.crop((x0, y0, x1, y1))
imgs.append(cimgp)
if 0 < ii < len(poss) - 1:
positions.append((pn + self.page_from, x0, x1, y0, y1))
remain_bottom -= page.size[1]
if not imgs:
return (None, None) if need_position else None
height = sum(i.size[1] + GAP for i in imgs)
width = max(i.size[0] for i in imgs)
pic = Image.new("RGB", (width, int(height)), (245, 245, 245))
h = 0
for ii, img in enumerate(imgs):
if ii == 0 or ii + 1 == len(imgs):
img = img.convert("RGBA")
overlay = Image.new("RGBA", img.size, (0, 0, 0, 0))
overlay.putalpha(128)
img = Image.alpha_composite(img, overlay).convert("RGB")
pic.paste(img, (0, int(h)))
h += img.size[1] + GAP
return (pic, positions) if need_position else pic
def _iter_doc_items(self, doc) -> Iterable[tuple[str, Any, Optional[_BBox]]]:
for t in getattr(doc, "texts", []):
parent=getattr(t, "parent", "")
ref=getattr(parent,"cref","")
label=getattr(t, "label", "")
if (label in ("section_header","text",) and ref in ("#/body",)) or label in ("list_item",):
text = getattr(t, "text", "") or ""
bbox = None
if getattr(t, "prov", None):
pn = getattr(t.prov[0], "page_no", None)
bb = getattr(t.prov[0], "bbox", None)
bb = [getattr(bb, "l", None),getattr(bb, "t", None),getattr(bb, "r", None),getattr(bb, "b", None)]
if pn and bb and len(bb) == 4:
bbox = _BBox(page_no=int(pn), x0=bb[0], y0=bb[1], x1=bb[2], y1=bb[3])
yield (DoclingContentType.TEXT.value, text, bbox)
for item in getattr(doc, "texts", []):
if getattr(item, "label", "") in ("FORMULA",):
text = getattr(item, "text", "") or ""
bbox = None
if getattr(item, "prov", None):
pn = getattr(item.prov, "page_no", None)
bb = getattr(item.prov, "bbox", None)
bb = [getattr(bb, "l", None),getattr(bb, "t", None),getattr(bb, "r", None),getattr(bb, "b", None)]
if pn and bb and len(bb) == 4:
bbox = _BBox(int(pn), bb[0], bb[1], bb[2], bb[3])
yield (DoclingContentType.EQUATION.value, text, bbox)
def _transfer_to_sections(self, doc, parse_method: str) -> list[tuple[str, str]]:
sections: list[tuple[str, str]] = []
for typ, payload, bbox in self._iter_doc_items(doc):
if typ == DoclingContentType.TEXT.value:
section = payload.strip()
if not section:
continue
elif typ == DoclingContentType.EQUATION.value:
section = payload.strip()
else:
continue
tag = self._make_line_tag(bbox) if isinstance(bbox,_BBox) else ""
if parse_method == "manual":
sections.append((section, typ, tag))
elif parse_method == "paper":
sections.append((section + tag, typ))
else:
sections.append((section, tag))
return sections
def cropout_docling_table(self, page_no: int, bbox: tuple[float, float, float, float], zoomin: int = 1):
if not getattr(self, "page_images", None):
return None, ""
idx = (page_no - 1) - getattr(self, "page_from", 0)
if idx < 0 or idx >= len(self.page_images):
return None, ""
page_img = self.page_images[idx]
W, H = page_img.size
left, top, right, bott = bbox
x0 = float(left)
y0 = float(H-top)
x1 = float(right)
y1 = float(H-bott)
x0, y0 = max(0.0, min(x0, W - 1)), max(0.0, min(y0, H - 1))
x1, y1 = max(x0 + 1.0, min(x1, W)), max(y0 + 1.0, min(y1, H))
try:
crop = page_img.crop((int(x0), int(y0), int(x1), int(y1))).convert("RGB")
except Exception:
return None, ""
pos = (page_no-1 if page_no>0 else 0, x0, x1, y0, y1)
return crop, [pos]
def _transfer_to_tables(self, doc):
tables = []
for tab in getattr(doc, "tables", []):
img = None
positions = ""
if getattr(tab, "prov", None):
pn = getattr(tab.prov[0], "page_no", None)
bb = getattr(tab.prov[0], "bbox", None)
if pn is not None and bb is not None:
left = getattr(bb, "l", None)
top = getattr(bb, "t", None)
right = getattr(bb, "r", None)
bott = getattr(bb, "b", None)
if None not in (left, top, right, bott):
img, positions = self.cropout_docling_table(int(pn), (float(left), float(top), float(right), float(bott)))
html = ""
try:
html = tab.export_to_html(doc=doc)
except Exception:
pass
tables.append(((img, html), positions if positions else ""))
for pic in getattr(doc, "pictures", []):
img = None
positions = ""
if getattr(pic, "prov", None):
pn = getattr(pic.prov[0], "page_no", None)
bb = getattr(pic.prov[0], "bbox", None)
if pn is not None and bb is not None:
left = getattr(bb, "l", None)
top = getattr(bb, "t", None)
right = getattr(bb, "r", None)
bott = getattr(bb, "b", None)
if None not in (left, top, right, bott):
img, positions = self.cropout_docling_table(int(pn), (float(left), float(top), float(right), float(bott)))
captions = ""
try:
captions = pic.caption_text(doc=doc)
except Exception:
pass
tables.append(((img, [captions]), positions if positions else ""))
return tables
def parse_pdf(
self,
filepath: str | PathLike[str],
binary: BytesIO | bytes | None = None,
callback: Optional[Callable] = None,
*,
output_dir: Optional[str] = None,
lang: Optional[str] = None,
method: str = "auto",
delete_output: bool = True,
parse_method: str = "raw"
):
if not self.check_installation():
raise RuntimeError("Docling not available, please install `docling`")
if binary is not None:
tmpdir = Path(output_dir) if output_dir else Path.cwd() / ".docling_tmp"
tmpdir.mkdir(parents=True, exist_ok=True)
name = Path(filepath).name or "input.pdf"
tmp_pdf = tmpdir / name
with open(tmp_pdf, "wb") as f:
if isinstance(binary, (bytes, bytearray)):
f.write(binary)
else:
f.write(binary.getbuffer())
src_path = tmp_pdf
else:
src_path = Path(filepath)
if not src_path.exists():
raise FileNotFoundError(f"PDF not found: {src_path}")
if callback:
callback(0.1, f"[Docling] Converting: {src_path}")
try:
self.__images__(str(src_path), zoomin=1)
except Exception as e:
self.logger.warning(f"[Docling] render pages failed: {e}")
conv = DocumentConverter()
conv_res = conv.convert(str(src_path))
doc = conv_res.document
if callback:
callback(0.7, f"[Docling] Parsed doc: {getattr(doc, 'num_pages', 'n/a')} pages")
sections = self._transfer_to_sections(doc, parse_method=parse_method)
tables = self._transfer_to_tables(doc)
if callback:
callback(0.95, f"[Docling] Sections: {len(sections)}, Tables: {len(tables)}")
if binary is not None and delete_output:
try:
Path(src_path).unlink(missing_ok=True)
except Exception:
pass
if callback:
callback(1.0, "[Docling] Done.")
return sections, tables
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = DoclingParser()
print("Docling available:", parser.check_installation())
sections, tables = parser.parse_pdf(filepath="test_docling/toc.pdf", binary=None)
print(len(sections), len(tables))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/html_parser.py | deepdoc/parser/html_parser.py | # -*- coding: utf-8 -*-
#
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rag.nlp import find_codec, rag_tokenizer
import uuid
import chardet
from bs4 import BeautifulSoup, NavigableString, Tag, Comment
import html
def get_encoding(file):
with open(file,'rb') as f:
tmp = chardet.detect(f.read())
return tmp['encoding']
BLOCK_TAGS = [
"h1", "h2", "h3", "h4", "h5", "h6",
"p", "div", "article", "section", "aside",
"ul", "ol", "li",
"table", "pre", "code", "blockquote",
"figure", "figcaption"
]
TITLE_TAGS = {"h1": "#", "h2": "##", "h3": "###", "h4": "#####", "h5": "#####", "h6": "######"}
class RAGFlowHtmlParser:
def __call__(self, fnm, binary=None, chunk_token_num=512):
if binary:
encoding = find_codec(binary)
txt = binary.decode(encoding, errors="ignore")
else:
with open(fnm, "r",encoding=get_encoding(fnm)) as f:
txt = f.read()
return self.parser_txt(txt, chunk_token_num)
@classmethod
def parser_txt(cls, txt, chunk_token_num):
if not isinstance(txt, str):
raise TypeError("txt type should be string!")
temp_sections = []
soup = BeautifulSoup(txt, "html5lib")
# delete <style> tag
for style_tag in soup.find_all(["style", "script"]):
style_tag.decompose()
# delete <script> tag in <div>
for div_tag in soup.find_all("div"):
for script_tag in div_tag.find_all("script"):
script_tag.decompose()
# delete inline style
for tag in soup.find_all(True):
if 'style' in tag.attrs:
del tag.attrs['style']
# delete HTML comment
for comment in soup.find_all(string=lambda text: isinstance(text, Comment)):
comment.extract()
cls.read_text_recursively(soup.body, temp_sections, chunk_token_num=chunk_token_num)
block_txt_list, table_list = cls.merge_block_text(temp_sections)
sections = cls.chunk_block(block_txt_list, chunk_token_num=chunk_token_num)
for table in table_list:
sections.append(table.get("content", ""))
return sections
@classmethod
def split_table(cls, html_table, chunk_token_num=512):
soup = BeautifulSoup(html_table, "html.parser")
rows = soup.find_all("tr")
tables = []
current_table = []
current_count = 0
table_str_list = []
for row in rows:
tks_str = rag_tokenizer.tokenize(str(row))
token_count = len(tks_str.split(" ")) if tks_str else 0
if current_count + token_count > chunk_token_num:
tables.append(current_table)
current_table = []
current_count = 0
current_table.append(row)
current_count += token_count
if current_table:
tables.append(current_table)
for table_rows in tables:
new_table = soup.new_tag("table")
for row in table_rows:
new_table.append(row)
table_str_list.append(str(new_table))
return table_str_list
@classmethod
def read_text_recursively(cls, element, parser_result, chunk_token_num=512, parent_name=None, block_id=None):
if isinstance(element, NavigableString):
content = element.strip()
def is_valid_html(content):
try:
soup = BeautifulSoup(content, "html.parser")
return bool(soup.find())
except Exception:
return False
return_info = []
if content:
if is_valid_html(content):
soup = BeautifulSoup(content, "html.parser")
child_info = cls.read_text_recursively(soup, parser_result, chunk_token_num, element.name, block_id)
parser_result.extend(child_info)
else:
info = {"content": element.strip(), "tag_name": "inner_text", "metadata": {"block_id": block_id}}
if parent_name:
info["tag_name"] = parent_name
return_info.append(info)
return return_info
elif isinstance(element, Tag):
if str.lower(element.name) == "table":
table_info_list = []
table_id = str(uuid.uuid1())
table_list = [html.unescape(str(element))]
for t in table_list:
table_info_list.append({"content": t, "tag_name": "table",
"metadata": {"table_id": table_id, "index": table_list.index(t)}})
return table_info_list
else:
if str.lower(element.name) in BLOCK_TAGS:
block_id = str(uuid.uuid1())
for child in element.children:
child_info = cls.read_text_recursively(child, parser_result, chunk_token_num, element.name,
block_id)
parser_result.extend(child_info)
return []
@classmethod
def merge_block_text(cls, parser_result):
block_content = []
current_content = ""
table_info_list = []
last_block_id = None
for item in parser_result:
content = item.get("content")
tag_name = item.get("tag_name")
title_flag = tag_name in TITLE_TAGS
block_id = item.get("metadata", {}).get("block_id")
if block_id:
if title_flag:
content = f"{TITLE_TAGS[tag_name]} {content}"
if last_block_id != block_id:
if last_block_id is not None:
block_content.append(current_content)
current_content = content
last_block_id = block_id
else:
current_content += (" " if current_content else "") + content
else:
if tag_name == "table":
table_info_list.append(item)
else:
current_content += (" " if current_content else "") + content
if current_content:
block_content.append(current_content)
return block_content, table_info_list
@classmethod
def chunk_block(cls, block_txt_list, chunk_token_num=512):
chunks = []
current_block = ""
current_token_count = 0
for block in block_txt_list:
tks_str = rag_tokenizer.tokenize(block)
block_token_count = len(tks_str.split(" ")) if tks_str else 0
if block_token_count > chunk_token_num:
if current_block:
chunks.append(current_block)
start = 0
tokens = tks_str.split(" ")
while start < len(tokens):
end = start + chunk_token_num
split_tokens = tokens[start:end]
chunks.append(" ".join(split_tokens))
start = end
current_block = ""
current_token_count = 0
else:
if current_token_count + block_token_count <= chunk_token_num:
current_block += ("\n" if current_block else "") + block
current_token_count += block_token_count
else:
chunks.append(current_block)
current_block = block
current_token_count = block_token_count
if current_block:
chunks.append(current_block)
return chunks
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/figure_parser.py | deepdoc/parser/figure_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from PIL import Image
from common.constants import LLMType
from api.db.services.llm_service import LLMBundle
from common.connection_utils import timeout
from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk
from rag.prompts.generator import vision_llm_figure_describe_prompt
def vision_figure_parser_figure_data_wrapper(figures_data_without_positions):
if not figures_data_without_positions:
return []
return [
(
(figure_data[1], [figure_data[0]]),
[(0, 0, 0, 0, 0)],
)
for figure_data in figures_data_without_positions
if isinstance(figure_data[1], Image.Image)
]
def vision_figure_parser_docx_wrapper(sections, tbls, callback=None,**kwargs):
if not sections:
return tbls
try:
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
callback(0.7, "Visual model detected. Attempting to enhance figure extraction...")
except Exception:
vision_model = None
if vision_model:
figures_data = vision_figure_parser_figure_data_wrapper(sections)
try:
docx_vision_parser = VisionFigureParser(vision_model=vision_model, figures_data=figures_data, **kwargs)
boosted_figures = docx_vision_parser(callback=callback)
tbls.extend(boosted_figures)
except Exception as e:
callback(0.8, f"Visual model error: {e}. Skipping figure parsing enhancement.")
return tbls
def vision_figure_parser_figure_xlsx_wrapper(images,callback=None, **kwargs):
tbls = []
if not images:
return []
try:
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
callback(0.2, "Visual model detected. Attempting to enhance Excel image extraction...")
except Exception:
vision_model = None
if vision_model:
figures_data = [((
img["image"], # Image.Image
[img["image_description"]] # description list (must be list)
),
[
(0, 0, 0, 0, 0) # dummy position
]) for img in images]
try:
parser = VisionFigureParser(vision_model=vision_model, figures_data=figures_data, **kwargs)
callback(0.22, "Parsing images...")
boosted_figures = parser(callback=callback)
tbls.extend(boosted_figures)
except Exception as e:
callback(0.25, f"Excel visual model error: {e}. Skipping vision enhancement.")
return tbls
def vision_figure_parser_pdf_wrapper(tbls, callback=None, **kwargs):
if not tbls:
return []
try:
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
callback(0.7, "Visual model detected. Attempting to enhance figure extraction...")
except Exception:
vision_model = None
if vision_model:
def is_figure_item(item):
return (
isinstance(item[0][0], Image.Image) and
isinstance(item[0][1], list)
)
figures_data = [item for item in tbls if is_figure_item(item)]
try:
docx_vision_parser = VisionFigureParser(vision_model=vision_model, figures_data=figures_data, **kwargs)
boosted_figures = docx_vision_parser(callback=callback)
tbls = [item for item in tbls if not is_figure_item(item)]
tbls.extend(boosted_figures)
except Exception as e:
callback(0.8, f"Visual model error: {e}. Skipping figure parsing enhancement.")
return tbls
shared_executor = ThreadPoolExecutor(max_workers=10)
class VisionFigureParser:
def __init__(self, vision_model, figures_data, *args, **kwargs):
self.vision_model = vision_model
self._extract_figures_info(figures_data)
assert len(self.figures) == len(self.descriptions)
assert not self.positions or (len(self.figures) == len(self.positions))
def _extract_figures_info(self, figures_data):
self.figures = []
self.descriptions = []
self.positions = []
for item in figures_data:
# position
if len(item) == 2 and isinstance(item[0], tuple) and len(item[0]) == 2 and isinstance(item[1], list) and isinstance(item[1][0], tuple) and len(item[1][0]) == 5:
img_desc = item[0]
assert len(img_desc) == 2 and isinstance(img_desc[0], Image.Image) and isinstance(img_desc[1], list), "Should be (figure, [description])"
self.figures.append(img_desc[0])
self.descriptions.append(img_desc[1])
self.positions.append(item[1])
else:
assert len(item) == 2 and isinstance(item[0], Image.Image) and isinstance(item[1], list), f"Unexpected form of figure data: get {len(item)=}, {item=}"
self.figures.append(item[0])
self.descriptions.append(item[1])
def _assemble(self):
self.assembled = []
self.has_positions = len(self.positions) != 0
for i in range(len(self.figures)):
figure = self.figures[i]
desc = self.descriptions[i]
pos = self.positions[i] if self.has_positions else None
figure_desc = (figure, desc)
if pos is not None:
self.assembled.append((figure_desc, pos))
else:
self.assembled.append((figure_desc,))
return self.assembled
def __call__(self, **kwargs):
callback = kwargs.get("callback", lambda prog, msg: None)
@timeout(30, 3)
def process(figure_idx, figure_binary):
description_text = picture_vision_llm_chunk(
binary=figure_binary,
vision_model=self.vision_model,
prompt=vision_llm_figure_describe_prompt(),
callback=callback,
)
return figure_idx, description_text
futures = []
for idx, img_binary in enumerate(self.figures or []):
futures.append(shared_executor.submit(process, idx, img_binary))
for future in as_completed(futures):
figure_num, txt = future.result()
if txt:
self.descriptions[figure_num] = txt + "\n".join(self.descriptions[figure_num])
self._assemble()
return self.assembled
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/__init__.py | deepdoc/parser/resume/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
def refactor(cv):
for n in [
"raw_txt",
"parser_name",
"inference",
"ori_text",
"use_time",
"time_stat",
]:
if n in cv and cv[n] is not None:
del cv[n]
cv["is_deleted"] = 0
if "basic" not in cv:
cv["basic"] = {}
if cv["basic"].get("photo2"):
del cv["basic"]["photo2"]
for n in [
"education",
"work",
"certificate",
"project",
"language",
"skill",
"training",
]:
if n not in cv or cv[n] is None:
continue
if isinstance(cv[n], dict):
cv[n] = [v for _, v in cv[n].items()]
if not isinstance(cv[n], list):
del cv[n]
continue
vv = []
for v in cv[n]:
if "external" in v and v["external"] is not None:
del v["external"]
vv.append(v)
cv[n] = {str(i): vv[i] for i in range(len(vv))}
basics = [
("basic_salary_month", "salary_month"),
("expect_annual_salary_from", "expect_annual_salary"),
]
for n, t in basics:
if cv["basic"].get(n):
cv["basic"][t] = cv["basic"][n]
del cv["basic"][n]
work = sorted(
[v for _, v in cv.get("work", {}).items()],
key=lambda x: x.get("start_time", ""),
)
edu = sorted(
[v for _, v in cv.get("education", {}).items()],
key=lambda x: x.get("start_time", ""),
)
if work:
cv["basic"]["work_start_time"] = work[0].get("start_time", "")
cv["basic"]["management_experience"] = (
"Y"
if any([w.get("management_experience", "") == "Y" for w in work])
else "N"
)
cv["basic"]["annual_salary"] = work[-1].get("annual_salary_from", "0")
for n in [
"annual_salary_from",
"annual_salary_to",
"industry_name",
"position_name",
"responsibilities",
"corporation_type",
"scale",
"corporation_name",
]:
cv["basic"][n] = work[-1].get(n, "")
if edu:
for n in ["school_name", "discipline_name"]:
if n in edu[-1]:
cv["basic"][n] = edu[-1][n]
cv["basic"]["updated_at"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if "contact" not in cv:
cv["contact"] = {}
if not cv["contact"].get("name"):
cv["contact"]["name"] = cv["basic"].get("name", "")
return cv
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/step_one.py | deepdoc/parser/resume/step_one.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from deepdoc.parser.resume.entities import degrees, regions, industries
FIELDS = [
"address STRING",
"annual_salary int",
"annual_salary_from int",
"annual_salary_to int",
"birth STRING",
"card STRING",
"certificate_obj string",
"city STRING",
"corporation_id int",
"corporation_name STRING",
"corporation_type STRING",
"degree STRING",
"discipline_name STRING",
"education_obj string",
"email STRING",
"expect_annual_salary int",
"expect_city_names string",
"expect_industry_name STRING",
"expect_position_name STRING",
"expect_salary_from int",
"expect_salary_to int",
"expect_type STRING",
"gender STRING",
"industry_name STRING",
"industry_names STRING",
"is_deleted STRING",
"is_fertility STRING",
"is_house STRING",
"is_management_experience STRING",
"is_marital STRING",
"is_oversea STRING",
"language_obj string",
"name STRING",
"nation STRING",
"phone STRING",
"political_status STRING",
"position_name STRING",
"project_obj string",
"responsibilities string",
"salary_month int",
"scale STRING",
"school_name STRING",
"self_remark string",
"skill_obj string",
"title_name STRING",
"tob_resume_id STRING",
"updated_at Timestamp",
"wechat STRING",
"work_obj string",
"work_experience int",
"work_start_time BIGINT"
]
def refactor(df):
def deal_obj(obj, k, kk):
if not isinstance(obj, type({})):
return ""
obj = obj.get(k, {})
if not isinstance(obj, type({})):
return ""
return obj.get(kk, "")
def loadjson(line):
try:
return json.loads(line)
except Exception:
pass
return {}
df["obj"] = df["resume_content"].map(lambda x: loadjson(x))
df.fillna("", inplace=True)
clms = ["tob_resume_id", "updated_at"]
def extract(nms, cc=None):
nonlocal clms
clms.extend(nms)
for c in nms:
if cc:
df[c] = df["obj"].map(lambda x: deal_obj(x, cc, c))
else:
df[c] = df["obj"].map(
lambda x: json.dumps(
x.get(
c,
{}),
ensure_ascii=False) if isinstance(
x,
type(
{})) and (
isinstance(
x.get(c),
type(
{})) or not x.get(c)) else str(x).replace(
"None",
""))
extract(["education", "work", "certificate", "project", "language",
"skill"])
extract(["wechat", "phone", "is_deleted",
"name", "tel", "email"], "contact")
extract(["nation", "expect_industry_name", "salary_month",
"industry_ids", "is_house", "birth", "annual_salary_from",
"annual_salary_to", "card",
"expect_salary_to", "expect_salary_from",
"expect_position_name", "gender", "city",
"is_fertility", "expect_city_names",
"political_status", "title_name", "expect_annual_salary",
"industry_name", "address", "position_name", "school_name",
"corporation_id",
"is_oversea", "responsibilities",
"work_start_time", "degree", "management_experience",
"expect_type", "corporation_type", "scale", "corporation_name",
"self_remark", "annual_salary", "work_experience",
"discipline_name", "marital", "updated_at"], "basic")
df["degree"] = df["degree"].map(lambda x: degrees.get_name(x))
df["address"] = df["address"].map(lambda x: " ".join(regions.get_names(x)))
df["industry_names"] = df["industry_ids"].map(lambda x: " ".join([" ".join(industries.get_names(i)) for i in
str(x).split(",")]))
clms.append("industry_names")
def arr2str(a):
if not a:
return ""
if isinstance(a, list):
a = " ".join([str(i) for i in a])
return str(a).replace(",", " ")
df["expect_industry_name"] = df["expect_industry_name"].map(
lambda x: arr2str(x))
df["gender"] = df["gender"].map(
lambda x: "男" if x == 'M' else (
"女" if x == 'F' else ""))
for c in ["is_fertility", "is_oversea", "is_house",
"management_experience", "marital"]:
df[c] = df[c].map(
lambda x: '是' if x == 'Y' else (
'否' if x == 'N' else ""))
df["is_management_experience"] = df["management_experience"]
df["is_marital"] = df["marital"]
clms.extend(["is_management_experience", "is_marital"])
df.fillna("", inplace=True)
for i in range(len(df)):
if not df.loc[i, "phone"].strip() and df.loc[i, "tel"].strip():
df.loc[i, "phone"] = df.loc[i, "tel"].strip()
for n in ["industry_ids", "management_experience", "marital", "tel"]:
for i in range(len(clms)):
if clms[i] == n:
del clms[i]
break
clms = list(set(clms))
df = df.reindex(sorted(clms), axis=1)
#print(json.dumps(list(df.columns.values)), "LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL")
for c in clms:
df[c] = df[c].map(
lambda s: str(s).replace(
"\t",
" ").replace(
"\n",
"\\n").replace(
"\r",
"\\n"))
# print(df.values.tolist())
return dict(zip([n.split()[0] for n in FIELDS], df.values.tolist()[0]))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/step_two.py | deepdoc/parser/resume/step_two.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import copy
import time
import datetime
import demjson3
import traceback
import signal
import numpy as np
from deepdoc.parser.resume.entities import degrees, schools, corporations
from rag.nlp import rag_tokenizer, surname
from xpinyin import Pinyin
from contextlib import contextmanager
class TimeoutException(Exception):
pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
ENV = None
PY = Pinyin()
def rmHtmlTag(line):
return re.sub(r"<[a-z0-9.\"=';,:\+_/ -]+>", " ", line, count=100000, flags=re.IGNORECASE)
def highest_degree(dg):
if not dg:
return ""
if isinstance(dg, str):
dg = [dg]
m = {"初中": 0, "高中": 1, "中专": 2, "大专": 3, "专升本": 4, "本科": 5, "硕士": 6, "博士": 7, "博士后": 8}
return sorted([(d, m.get(d, -1)) for d in dg], key=lambda x: x[1] * -1)[0][0]
def forEdu(cv):
if not cv.get("education_obj"):
cv["integerity_flt"] *= 0.8
return cv
first_fea, fea, maj, fmaj, deg, fdeg, sch, fsch, st_dt, ed_dt = [], [], [], [], [], [], [], [], [], []
edu_nst = []
edu_end_dt = ""
cv["school_rank_int"] = 1000000
for ii, n in enumerate(sorted(cv["education_obj"], key=lambda x: x.get("start_time", "3"))):
e = {}
if n.get("end_time"):
if n["end_time"] > edu_end_dt:
edu_end_dt = n["end_time"]
try:
dt = n["end_time"]
if re.match(r"[0-9]{9,}", dt):
dt = turnTm2Dt(dt)
y, m, d = getYMD(dt)
ed_dt.append(str(y))
e["end_dt_kwd"] = str(y)
except Exception as e:
pass
if n.get("start_time"):
try:
dt = n["start_time"]
if re.match(r"[0-9]{9,}", dt):
dt = turnTm2Dt(dt)
y, m, d = getYMD(dt)
st_dt.append(str(y))
e["start_dt_kwd"] = str(y)
except Exception:
pass
r = schools.select(n.get("school_name", ""))
if r:
if str(r.get("type", "")) == "1":
fea.append("211")
if str(r.get("type", "")) == "2":
fea.append("211")
if str(r.get("is_abroad", "")) == "1":
fea.append("留学")
if str(r.get("is_double_first", "")) == "1":
fea.append("双一流")
if str(r.get("is_985", "")) == "1":
fea.append("985")
if str(r.get("is_world_known", "")) == "1":
fea.append("海外知名")
if r.get("rank") and cv["school_rank_int"] > r["rank"]:
cv["school_rank_int"] = r["rank"]
if n.get("school_name") and isinstance(n["school_name"], str):
sch.append(re.sub(r"(211|985|重点大学|[,&;;-])", "", n["school_name"]))
e["sch_nm_kwd"] = sch[-1]
fea.append(rag_tokenizer.fine_grained_tokenize(rag_tokenizer.tokenize(n.get("school_name", ""))).split()[-1])
if n.get("discipline_name") and isinstance(n["discipline_name"], str):
maj.append(n["discipline_name"])
e["major_kwd"] = n["discipline_name"]
if not n.get("degree") and "985" in fea and not first_fea:
n["degree"] = "1"
if n.get("degree"):
d = degrees.get_name(n["degree"])
if d:
e["degree_kwd"] = d
if d == "本科" and ("专科" in deg or "专升本" in deg or "中专" in deg or "大专" in deg or re.search(r"(成人|自考|自学考试)", n.get("school_name",""))):
d = "专升本"
if d:
deg.append(d)
# for first degree
if not fdeg and d in ["中专", "专升本", "专科", "本科", "大专"]:
fdeg = [d]
if n.get("school_name"):
fsch = [n["school_name"]]
if n.get("discipline_name"):
fmaj = [n["discipline_name"]]
first_fea = copy.deepcopy(fea)
edu_nst.append(e)
cv["sch_rank_kwd"] = []
if cv["school_rank_int"] <= 20 or ("海外名校" in fea and cv["school_rank_int"] <= 200):
cv["sch_rank_kwd"].append("顶尖学校")
elif 50 >= cv["school_rank_int"] > 20 or ("海外名校" in fea and 500 >= cv["school_rank_int"] > 200):
cv["sch_rank_kwd"].append("精英学校")
elif cv["school_rank_int"] > 50 and ("985" in fea or "211" in fea) or ("海外名校" in fea and cv["school_rank_int"] > 500):
cv["sch_rank_kwd"].append("优质学校")
else:
cv["sch_rank_kwd"].append("一般学校")
if edu_nst:
cv["edu_nst"] = edu_nst
if fea:
cv["edu_fea_kwd"] = list(set(fea))
if first_fea:
cv["edu_first_fea_kwd"] = list(set(first_fea))
if maj:
cv["major_kwd"] = maj
if fsch:
cv["first_school_name_kwd"] = fsch
if fdeg:
cv["first_degree_kwd"] = fdeg
if fmaj:
cv["first_major_kwd"] = fmaj
if st_dt:
cv["edu_start_kwd"] = st_dt
if ed_dt:
cv["edu_end_kwd"] = ed_dt
if ed_dt:
cv["edu_end_int"] = max([int(t) for t in ed_dt])
if deg:
if "本科" in deg and "专科" in deg:
deg.append("专升本")
deg = [d for d in deg if d != '本科']
cv["degree_kwd"] = deg
cv["highest_degree_kwd"] = highest_degree(deg)
if edu_end_dt:
try:
if re.match(r"[0-9]{9,}", edu_end_dt):
edu_end_dt = turnTm2Dt(edu_end_dt)
if edu_end_dt.strip("\n") == "至今":
edu_end_dt = cv.get("updated_at_dt", str(datetime.date.today()))
y, m, d = getYMD(edu_end_dt)
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
except Exception as e:
logging.exception("forEdu {} {} {}".format(e, edu_end_dt, cv.get("work_exp_flt")))
if sch:
cv["school_name_kwd"] = sch
if (len(cv.get("degree_kwd", [])) >= 1 and "本科" in cv["degree_kwd"]) \
or all([c.lower() in ["硕士", "博士", "mba", "博士后"] for c in cv.get("degree_kwd", [])]) \
or not cv.get("degree_kwd"):
for c in sch:
if schools.is_good(c):
if "tag_kwd" not in cv:
cv["tag_kwd"] = []
cv["tag_kwd"].append("好学校")
cv["tag_kwd"].append("好学历")
break
if (len(cv.get("degree_kwd", [])) >= 1 and "本科" in cv["degree_kwd"] and
any([d.lower() in ["硕士", "博士", "mba", "博士"] for d in cv.get("degree_kwd", [])])) \
or all([d.lower() in ["硕士", "博士", "mba", "博士后"] for d in cv.get("degree_kwd", [])]) \
or any([d in ["mba", "emba", "博士后"] for d in cv.get("degree_kwd", [])]):
if "tag_kwd" not in cv:
cv["tag_kwd"] = []
if "好学历" not in cv["tag_kwd"]:
cv["tag_kwd"].append("好学历")
if cv.get("major_kwd"):
cv["major_tks"] = rag_tokenizer.tokenize(" ".join(maj))
if cv.get("school_name_kwd"):
cv["school_name_tks"] = rag_tokenizer.tokenize(" ".join(sch))
if cv.get("first_school_name_kwd"):
cv["first_school_name_tks"] = rag_tokenizer.tokenize(" ".join(fsch))
if cv.get("first_major_kwd"):
cv["first_major_tks"] = rag_tokenizer.tokenize(" ".join(fmaj))
return cv
def forProj(cv):
if not cv.get("project_obj"):
return cv
pro_nms, desc = [], []
for i, n in enumerate(
sorted(cv.get("project_obj", []), key=lambda x: str(x.get("updated_at", "")) if isinstance(x, dict) else "",
reverse=True)):
if n.get("name"):
pro_nms.append(n["name"])
if n.get("describe"):
desc.append(str(n["describe"]))
if n.get("responsibilities"):
desc.append(str(n["responsibilities"]))
if n.get("achivement"):
desc.append(str(n["achivement"]))
if pro_nms:
# cv["pro_nms_tks"] = rag_tokenizer.tokenize(" ".join(pro_nms))
cv["project_name_tks"] = rag_tokenizer.tokenize(pro_nms[0])
if desc:
cv["pro_desc_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(" ".join(desc)))
cv["project_desc_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(desc[0]))
return cv
def json_loads(line):
return demjson3.decode(re.sub(r": *(True|False)", r": '\1'", line))
def forWork(cv):
if not cv.get("work_obj"):
cv["integerity_flt"] *= 0.7
return cv
flds = ["position_name", "corporation_name", "corporation_id", "responsibilities",
"industry_name", "subordinates_count"]
duas = []
scales = []
fea = {c: [] for c in flds}
latest_job_tm = ""
goodcorp = False
goodcorp_ = False
work_st_tm = ""
corp_tags = []
for i, n in enumerate(
sorted(cv.get("work_obj", []), key=lambda x: str(x.get("start_time", "")) if isinstance(x, dict) else "",
reverse=True)):
if isinstance(n, str):
try:
n = json_loads(n)
except Exception:
continue
if n.get("start_time") and (not work_st_tm or n["start_time"] < work_st_tm):
work_st_tm = n["start_time"]
for c in flds:
if not n.get(c) or str(n[c]) == '0':
fea[c].append("")
continue
if c == "corporation_name":
n[c] = corporations.corpNorm(n[c], False)
if corporations.is_good(n[c]):
if i == 0:
goodcorp = True
else:
goodcorp_ = True
ct = corporations.corp_tag(n[c])
if i == 0:
corp_tags.extend(ct)
elif ct and ct[0] != "软外":
corp_tags.extend([f"{t}(曾)" for t in ct])
fea[c].append(rmHtmlTag(str(n[c]).lower()))
y, m, d = getYMD(n.get("start_time"))
if not y or not m:
continue
st = "%s-%02d-%02d" % (y, int(m), int(d))
latest_job_tm = st
y, m, d = getYMD(n.get("end_time"))
if (not y or not m) and i > 0:
continue
if not y or not m or int(y) > 2022:
y, m, d = getYMD(str(n.get("updated_at", "")))
if not y or not m:
continue
ed = "%s-%02d-%02d" % (y, int(m), int(d))
try:
duas.append((datetime.datetime.strptime(ed, "%Y-%m-%d") - datetime.datetime.strptime(st, "%Y-%m-%d")).days)
except Exception:
logging.exception("forWork {} {}".format(n.get("start_time"), n.get("end_time")))
if n.get("scale"):
r = re.search(r"^([0-9]+)", str(n["scale"]))
if r:
scales.append(int(r.group(1)))
if goodcorp:
if "tag_kwd" not in cv:
cv["tag_kwd"] = []
cv["tag_kwd"].append("好公司")
if goodcorp_:
if "tag_kwd" not in cv:
cv["tag_kwd"] = []
cv["tag_kwd"].append("好公司(曾)")
if corp_tags:
if "tag_kwd" not in cv:
cv["tag_kwd"] = []
cv["tag_kwd"].extend(corp_tags)
cv["corp_tag_kwd"] = [c for c in corp_tags if re.match(r"(综合|行业)", c)]
if latest_job_tm:
cv["latest_job_dt"] = latest_job_tm
if fea["corporation_id"]:
cv["corporation_id"] = fea["corporation_id"]
if fea["position_name"]:
cv["position_name_tks"] = rag_tokenizer.tokenize(fea["position_name"][0])
cv["position_name_sm_tks"] = rag_tokenizer.fine_grained_tokenize(cv["position_name_tks"])
cv["pos_nm_tks"] = rag_tokenizer.tokenize(" ".join(fea["position_name"][1:]))
if fea["industry_name"]:
cv["industry_name_tks"] = rag_tokenizer.tokenize(fea["industry_name"][0])
cv["industry_name_sm_tks"] = rag_tokenizer.fine_grained_tokenize(cv["industry_name_tks"])
cv["indu_nm_tks"] = rag_tokenizer.tokenize(" ".join(fea["industry_name"][1:]))
if fea["corporation_name"]:
cv["corporation_name_kwd"] = fea["corporation_name"][0]
cv["corp_nm_kwd"] = fea["corporation_name"]
cv["corporation_name_tks"] = rag_tokenizer.tokenize(fea["corporation_name"][0])
cv["corporation_name_sm_tks"] = rag_tokenizer.fine_grained_tokenize(cv["corporation_name_tks"])
cv["corp_nm_tks"] = rag_tokenizer.tokenize(" ".join(fea["corporation_name"][1:]))
if fea["responsibilities"]:
cv["responsibilities_ltks"] = rag_tokenizer.tokenize(fea["responsibilities"][0])
cv["resp_ltks"] = rag_tokenizer.tokenize(" ".join(fea["responsibilities"][1:]))
if fea["subordinates_count"]:
fea["subordinates_count"] = [int(i) for i in fea["subordinates_count"] if
re.match(r"[^0-9]+$", str(i))]
if fea["subordinates_count"]:
cv["max_sub_cnt_int"] = np.max(fea["subordinates_count"])
if isinstance(cv.get("corporation_id"), int):
cv["corporation_id"] = [str(cv["corporation_id"])]
if not cv.get("corporation_id"):
cv["corporation_id"] = []
for i in cv.get("corporation_id", []):
cv["baike_flt"] = max(corporations.baike(i), cv["baike_flt"] if "baike_flt" in cv else 0)
if work_st_tm:
try:
if re.match(r"[0-9]{9,}", work_st_tm):
work_st_tm = turnTm2Dt(work_st_tm)
y, m, d = getYMD(work_st_tm)
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
except Exception as e:
logging.exception("forWork {} {} {}".format(e, work_st_tm, cv.get("work_exp_flt")))
cv["job_num_int"] = 0
if duas:
cv["dua_flt"] = np.mean(duas)
cv["cur_dua_int"] = duas[0]
cv["job_num_int"] = len(duas)
if scales:
cv["scale_flt"] = np.max(scales)
return cv
def turnTm2Dt(b):
if not b:
return None
b = str(b).strip()
if re.match(r"[0-9]{10,}", b):
b = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(b[:10])))
return b
def getYMD(b):
y, m, d = "", "", "01"
if not b:
return y, m, d
b = turnTm2Dt(b)
if re.match(r"[0-9]{4}", b):
y = int(b[:4])
r = re.search(r"[0-9]{4}.?([0-9]{1,2})", b)
if r:
m = r.group(1)
r = re.search(r"[0-9]{4}.?[0-9]{,2}.?([0-9]{1,2})", b)
if r:
d = r.group(1)
if not d or int(d) == 0 or int(d) > 31:
d = "1"
if not m or int(m) > 12 or int(m) < 1:
m = "1"
return y, m, d
def birth(cv):
if not cv.get("birth"):
cv["integerity_flt"] *= 0.9
return cv
y, m, d = getYMD(cv["birth"])
if not m or not y:
return cv
b = "%s-%02d-%02d" % (y, int(m), int(d))
cv["birth_dt"] = b
cv["birthday_kwd"] = "%02d%02d" % (int(m), int(d))
cv["age_int"] = datetime.datetime.now().year - int(y)
return cv
def parse(cv):
for k in cv.keys():
if cv[k] == '\\N':
cv[k] = ''
# cv = cv.asDict()
tks_fld = ["address", "corporation_name", "discipline_name", "email", "expect_city_names",
"expect_industry_name", "expect_position_name", "industry_name", "industry_names", "name",
"position_name", "school_name", "self_remark", "title_name"]
small_tks_fld = ["corporation_name", "expect_position_name", "position_name", "school_name", "title_name"]
kwd_fld = ["address", "city", "corporation_type", "degree", "discipline_name", "expect_city_names", "email",
"expect_industry_name", "expect_position_name", "expect_type", "gender", "industry_name",
"industry_names", "political_status", "position_name", "scale", "school_name", "phone", "tel"]
num_fld = ["annual_salary", "annual_salary_from", "annual_salary_to", "expect_annual_salary", "expect_salary_from",
"expect_salary_to", "salary_month"]
is_fld = [
("is_fertility", "已育", "未育"),
("is_house", "有房", "没房"),
("is_management_experience", "有管理经验", "无管理经验"),
("is_marital", "已婚", "未婚"),
("is_oversea", "有海外经验", "无海外经验")
]
rmkeys = []
for k in cv.keys():
if cv[k] is None:
rmkeys.append(k)
if (isinstance(cv[k], list) or isinstance(cv[k], str)) and len(cv[k]) == 0:
rmkeys.append(k)
for k in rmkeys:
del cv[k]
integrity = 0.
flds_num = 0.
def hasValues(flds):
nonlocal integrity, flds_num
flds_num += len(flds)
for f in flds:
v = str(cv.get(f, ""))
if len(v) > 0 and v != '0' and v != '[]':
integrity += 1
hasValues(tks_fld)
hasValues(small_tks_fld)
hasValues(kwd_fld)
hasValues(num_fld)
cv["integerity_flt"] = integrity / flds_num
if cv.get("corporation_type"):
for p, r in [(r"(公司|企业|其它|其他|Others*|\n|未填写|Enterprises|Company|companies)", ""),
(r"[//.· <\((]+.*", ""),
(r".*(合资|民企|股份制|中外|私营|个体|Private|创业|Owned|投资).*", "民营"),
(r".*(机关|事业).*", "机关"),
(r".*(非盈利|Non-profit).*", "非盈利"),
(r".*(外企|外商|欧美|foreign|Institution|Australia|港资).*", "外企"),
(r".*国有.*", "国企"),
(r"[ ()\(\)人/·0-9-]+", ""),
(r".*(元|规模|于|=|北京|上海|至今|中国|工资|州|shanghai|强|餐饮|融资|职).*", "")]:
cv["corporation_type"] = re.sub(p, r, cv["corporation_type"], count=1000, flags=re.IGNORECASE)
if len(cv["corporation_type"]) < 2:
del cv["corporation_type"]
if cv.get("political_status"):
for p, r in [
(r".*党员.*", "党员"),
(r".*(无党派|公民).*", "群众"),
(r".*团员.*", "团员")]:
cv["political_status"] = re.sub(p, r, cv["political_status"])
if not re.search(r"[党团群]", cv["political_status"]):
del cv["political_status"]
if cv.get("phone"):
cv["phone"] = re.sub(r"^0*86([0-9]{11})", r"\1", re.sub(r"[^0-9]+", "", cv["phone"]))
keys = list(cv.keys())
for k in keys:
# deal with json objects
if k.find("_obj") > 0:
try:
cv[k] = json_loads(cv[k])
cv[k] = [a for _, a in cv[k].items()]
nms = []
for n in cv[k]:
if not isinstance(n, dict) or "name" not in n or not n.get("name"):
continue
n["name"] = re.sub(r"((442)|\t )", "", n["name"]).strip().lower()
if not n["name"]:
continue
nms.append(n["name"])
if nms:
t = k[:-4]
cv[f"{t}_kwd"] = nms
cv[f"{t}_tks"] = rag_tokenizer.tokenize(" ".join(nms))
except Exception:
logging.exception("parse {} {}".format(str(traceback.format_exc()), cv[k]))
cv[k] = []
# tokenize fields
if k in tks_fld:
cv[f"{k}_tks"] = rag_tokenizer.tokenize(cv[k])
if k in small_tks_fld:
cv[f"{k}_sm_tks"] = rag_tokenizer.tokenize(cv[f"{k}_tks"])
# keyword fields
if k in kwd_fld:
cv[f"{k}_kwd"] = [n.lower()
for n in re.split(r"[\t,,;;. ]",
re.sub(r"([^a-zA-Z])[ ]+([^a-zA-Z ])", r"\1,\2", cv[k])
) if n]
if k in num_fld and cv.get(k):
cv[f"{k}_int"] = cv[k]
cv["email_kwd"] = cv.get("email_tks", "").replace(" ", "")
# for name field
if cv.get("name"):
nm = re.sub(r"[\n——\-\((\+].*", "", cv["name"].strip())
nm = re.sub(r"[ \t ]+", " ", nm)
if re.match(r"[a-zA-Z ]+$", nm):
if len(nm.split()) > 1:
cv["name"] = nm
else:
nm = ""
elif nm and (surname.isit(nm[0]) or surname.isit(nm[:2])):
nm = re.sub(r"[a-zA-Z]+.*", "", nm[:5])
else:
nm = ""
cv["name"] = nm.strip()
name = cv["name"]
# name pingyin and its prefix
cv["name_py_tks"] = " ".join(PY.get_pinyins(nm[:20], '')) + " " + " ".join(PY.get_pinyins(nm[:20], ' '))
cv["name_py_pref0_tks"] = ""
cv["name_py_pref_tks"] = ""
for py in PY.get_pinyins(nm[:20], ''):
for i in range(2, len(py) + 1):
cv["name_py_pref_tks"] += " " + py[:i]
for py in PY.get_pinyins(nm[:20], ' '):
py = py.split()
for i in range(1, len(py) + 1):
cv["name_py_pref0_tks"] += " " + "".join(py[:i])
cv["name_kwd"] = name
cv["name_pinyin_kwd"] = PY.get_pinyins(nm[:20], ' ')[:3]
cv["name_tks"] = (
rag_tokenizer.tokenize(name) + " " + (" ".join(list(name)) if not re.match(r"[a-zA-Z ]+$", name) else "")
) if name else ""
else:
cv["integerity_flt"] /= 2.
if cv.get("phone"):
r = re.search(r"(1[3456789][0-9]{9})", cv["phone"])
if not r:
cv["phone"] = ""
else:
cv["phone"] = r.group(1)
# deal with date fields
if cv.get("updated_at") and isinstance(cv["updated_at"], datetime.datetime):
cv["updated_at_dt"] = cv["updated_at"].strftime('%Y-%m-%d %H:%M:%S')
else:
y, m, d = getYMD(str(cv.get("updated_at", "")))
if not y:
y = "2012"
if not m:
m = "01"
if not d:
d = "01"
cv["updated_at_dt"] = "%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
# long text tokenize
if cv.get("responsibilities"):
cv["responsibilities_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(cv["responsibilities"]))
# for yes or no field
fea = []
for f, y, n in is_fld:
if f not in cv:
continue
if cv[f] == '是':
fea.append(y)
if cv[f] == '否':
fea.append(n)
if fea:
cv["tag_kwd"] = fea
cv = forEdu(cv)
cv = forProj(cv)
cv = forWork(cv)
cv = birth(cv)
cv["corp_proj_sch_deg_kwd"] = [c for c in cv.get("corp_tag_kwd", [])]
for i in range(len(cv["corp_proj_sch_deg_kwd"])):
for j in cv.get("sch_rank_kwd", []):
cv["corp_proj_sch_deg_kwd"][i] += "+" + j
for i in range(len(cv["corp_proj_sch_deg_kwd"])):
if cv.get("highest_degree_kwd"):
cv["corp_proj_sch_deg_kwd"][i] += "+" + cv["highest_degree_kwd"]
try:
if not cv.get("work_exp_flt") and cv.get("work_start_time"):
if re.match(r"[0-9]{9,}", str(cv["work_start_time"])):
cv["work_start_dt"] = turnTm2Dt(cv["work_start_time"])
cv["work_exp_flt"] = (time.time() - int(int(cv["work_start_time"]) / 1000)) / 3600. / 24. / 365.
elif re.match(r"[0-9]{4}[^0-9]", str(cv["work_start_time"])):
y, m, d = getYMD(str(cv["work_start_time"]))
cv["work_start_dt"] = "%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
cv["work_exp_flt"] = int(str(datetime.date.today())[0:4]) - int(y)
except Exception as e:
logging.exception("parse {} ==> {}".format(e, cv.get("work_start_time")))
if "work_exp_flt" not in cv and cv.get("work_experience", 0):
cv["work_exp_flt"] = int(cv["work_experience"]) / 12.
keys = list(cv.keys())
for k in keys:
if not re.search(r"_(fea|tks|nst|dt|int|flt|ltks|kwd|id)$", k):
del cv[k]
for k in cv.keys():
if not re.search("_(kwd|id)$", k) or not isinstance(cv[k], list):
continue
cv[k] = list(set([re.sub("(市)$", "", str(n)) for n in cv[k] if n not in ['中国', '0']]))
keys = [k for k in cv.keys() if re.search(r"_feas*$", k)]
for k in keys:
if cv[k] <= 0:
del cv[k]
cv["tob_resume_id"] = str(cv["tob_resume_id"])
cv["id"] = cv["tob_resume_id"]
logging.debug("CCCCCCCCCCCCCCC")
return dealWithInt64(cv)
def dealWithInt64(d):
if isinstance(d, dict):
for n, v in d.items():
d[n] = dealWithInt64(v)
if isinstance(d, list):
d = [dealWithInt64(t) for t in d]
if isinstance(d, np.integer):
d = int(d)
return d
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/entities/regions.py | deepdoc/parser/resume/entities/regions.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
TBL = {
"2": {"name": "北京", "parent": "1"},
"3": {"name": "天津", "parent": "1"},
"4": {"name": "河北", "parent": "1"},
"5": {"name": "山西", "parent": "1"},
"6": {"name": "内蒙古", "parent": "1"},
"7": {"name": "辽宁", "parent": "1"},
"8": {"name": "吉林", "parent": "1"},
"9": {"name": "黑龙江", "parent": "1"},
"10": {"name": "上海", "parent": "1"},
"11": {"name": "江苏", "parent": "1"},
"12": {"name": "浙江", "parent": "1"},
"13": {"name": "安徽", "parent": "1"},
"14": {"name": "福建", "parent": "1"},
"15": {"name": "江西", "parent": "1"},
"16": {"name": "山东", "parent": "1"},
"17": {"name": "河南", "parent": "1"},
"18": {"name": "湖北", "parent": "1"},
"19": {"name": "湖南", "parent": "1"},
"20": {"name": "广东", "parent": "1"},
"21": {"name": "广西", "parent": "1"},
"22": {"name": "海南", "parent": "1"},
"23": {"name": "重庆", "parent": "1"},
"24": {"name": "四川", "parent": "1"},
"25": {"name": "贵州", "parent": "1"},
"26": {"name": "云南", "parent": "1"},
"27": {"name": "西藏", "parent": "1"},
"28": {"name": "陕西", "parent": "1"},
"29": {"name": "甘肃", "parent": "1"},
"30": {"name": "青海", "parent": "1"},
"31": {"name": "宁夏", "parent": "1"},
"32": {"name": "新疆", "parent": "1"},
"33": {"name": "北京市", "parent": "2"},
"34": {"name": "天津市", "parent": "3"},
"35": {"name": "石家庄市", "parent": "4"},
"36": {"name": "唐山市", "parent": "4"},
"37": {"name": "秦皇岛市", "parent": "4"},
"38": {"name": "邯郸市", "parent": "4"},
"39": {"name": "邢台市", "parent": "4"},
"40": {"name": "保定市", "parent": "4"},
"41": {"name": "张家口市", "parent": "4"},
"42": {"name": "承德市", "parent": "4"},
"43": {"name": "沧州市", "parent": "4"},
"44": {"name": "廊坊市", "parent": "4"},
"45": {"name": "衡水市", "parent": "4"},
"46": {"name": "太原市", "parent": "5"},
"47": {"name": "大同市", "parent": "5"},
"48": {"name": "阳泉市", "parent": "5"},
"49": {"name": "长治市", "parent": "5"},
"50": {"name": "晋城市", "parent": "5"},
"51": {"name": "朔州市", "parent": "5"},
"52": {"name": "晋中市", "parent": "5"},
"53": {"name": "运城市", "parent": "5"},
"54": {"name": "忻州市", "parent": "5"},
"55": {"name": "临汾市", "parent": "5"},
"56": {"name": "吕梁市", "parent": "5"},
"57": {"name": "呼和浩特市", "parent": "6"},
"58": {"name": "包头市", "parent": "6"},
"59": {"name": "乌海市", "parent": "6"},
"60": {"name": "赤峰市", "parent": "6"},
"61": {"name": "通辽市", "parent": "6"},
"62": {"name": "鄂尔多斯市", "parent": "6"},
"63": {"name": "呼伦贝尔市", "parent": "6"},
"64": {"name": "巴彦淖尔市", "parent": "6"},
"65": {"name": "乌兰察布市", "parent": "6"},
"66": {"name": "兴安盟", "parent": "6"},
"67": {"name": "锡林郭勒盟", "parent": "6"},
"68": {"name": "阿拉善盟", "parent": "6"},
"69": {"name": "沈阳市", "parent": "7"},
"70": {"name": "大连市", "parent": "7"},
"71": {"name": "鞍山市", "parent": "7"},
"72": {"name": "抚顺市", "parent": "7"},
"73": {"name": "本溪市", "parent": "7"},
"74": {"name": "丹东市", "parent": "7"},
"75": {"name": "锦州市", "parent": "7"},
"76": {"name": "营口市", "parent": "7"},
"77": {"name": "阜新市", "parent": "7"},
"78": {"name": "辽阳市", "parent": "7"},
"79": {"name": "盘锦市", "parent": "7"},
"80": {"name": "铁岭市", "parent": "7"},
"81": {"name": "朝阳市", "parent": "7"},
"82": {"name": "葫芦岛市", "parent": "7"},
"83": {"name": "长春市", "parent": "8"},
"84": {"name": "吉林市", "parent": "8"},
"85": {"name": "四平市", "parent": "8"},
"86": {"name": "辽源市", "parent": "8"},
"87": {"name": "通化市", "parent": "8"},
"88": {"name": "白山市", "parent": "8"},
"89": {"name": "松原市", "parent": "8"},
"90": {"name": "白城市", "parent": "8"},
"91": {"name": "延边朝鲜族自治州", "parent": "8"},
"92": {"name": "哈尔滨市", "parent": "9"},
"93": {"name": "齐齐哈尔市", "parent": "9"},
"94": {"name": "鸡西市", "parent": "9"},
"95": {"name": "鹤岗市", "parent": "9"},
"96": {"name": "双鸭山市", "parent": "9"},
"97": {"name": "大庆市", "parent": "9"},
"98": {"name": "伊春市", "parent": "9"},
"99": {"name": "佳木斯市", "parent": "9"},
"100": {"name": "七台河市", "parent": "9"},
"101": {"name": "牡丹江市", "parent": "9"},
"102": {"name": "黑河市", "parent": "9"},
"103": {"name": "绥化市", "parent": "9"},
"104": {"name": "大兴安岭地区", "parent": "9"},
"105": {"name": "上海市", "parent": "10"},
"106": {"name": "南京市", "parent": "11"},
"107": {"name": "无锡市", "parent": "11"},
"108": {"name": "徐州市", "parent": "11"},
"109": {"name": "常州市", "parent": "11"},
"110": {"name": "苏州市", "parent": "11"},
"111": {"name": "南通市", "parent": "11"},
"112": {"name": "连云港市", "parent": "11"},
"113": {"name": "淮安市", "parent": "11"},
"114": {"name": "盐城市", "parent": "11"},
"115": {"name": "扬州市", "parent": "11"},
"116": {"name": "镇江市", "parent": "11"},
"117": {"name": "泰州市", "parent": "11"},
"118": {"name": "宿迁市", "parent": "11"},
"119": {"name": "杭州市", "parent": "12"},
"120": {"name": "宁波市", "parent": "12"},
"121": {"name": "温州市", "parent": "12"},
"122": {"name": "嘉兴市", "parent": "12"},
"123": {"name": "湖州市", "parent": "12"},
"124": {"name": "绍兴市", "parent": "12"},
"125": {"name": "金华市", "parent": "12"},
"126": {"name": "衢州市", "parent": "12"},
"127": {"name": "舟山市", "parent": "12"},
"128": {"name": "台州市", "parent": "12"},
"129": {"name": "丽水市", "parent": "12"},
"130": {"name": "合肥市", "parent": "13"},
"131": {"name": "芜湖市", "parent": "13"},
"132": {"name": "蚌埠市", "parent": "13"},
"133": {"name": "淮南市", "parent": "13"},
"134": {"name": "马鞍山市", "parent": "13"},
"135": {"name": "淮北市", "parent": "13"},
"136": {"name": "铜陵市", "parent": "13"},
"137": {"name": "安庆市", "parent": "13"},
"138": {"name": "黄山市", "parent": "13"},
"139": {"name": "滁州市", "parent": "13"},
"140": {"name": "阜阳市", "parent": "13"},
"141": {"name": "宿州市", "parent": "13"},
"143": {"name": "六安市", "parent": "13"},
"144": {"name": "亳州市", "parent": "13"},
"145": {"name": "池州市", "parent": "13"},
"146": {"name": "宣城市", "parent": "13"},
"147": {"name": "福州市", "parent": "14"},
"148": {"name": "厦门市", "parent": "14"},
"149": {"name": "莆田市", "parent": "14"},
"150": {"name": "三明市", "parent": "14"},
"151": {"name": "泉州市", "parent": "14"},
"152": {"name": "漳州市", "parent": "14"},
"153": {"name": "南平市", "parent": "14"},
"154": {"name": "龙岩市", "parent": "14"},
"155": {"name": "宁德市", "parent": "14"},
"156": {"name": "南昌市", "parent": "15"},
"157": {"name": "景德镇市", "parent": "15"},
"158": {"name": "萍乡市", "parent": "15"},
"159": {"name": "九江市", "parent": "15"},
"160": {"name": "新余市", "parent": "15"},
"161": {"name": "鹰潭市", "parent": "15"},
"162": {"name": "赣州市", "parent": "15"},
"163": {"name": "吉安市", "parent": "15"},
"164": {"name": "宜春市", "parent": "15"},
"165": {"name": "抚州市", "parent": "15"},
"166": {"name": "上饶市", "parent": "15"},
"167": {"name": "济南市", "parent": "16"},
"168": {"name": "青岛市", "parent": "16"},
"169": {"name": "淄博市", "parent": "16"},
"170": {"name": "枣庄市", "parent": "16"},
"171": {"name": "东营市", "parent": "16"},
"172": {"name": "烟台市", "parent": "16"},
"173": {"name": "潍坊市", "parent": "16"},
"174": {"name": "济宁市", "parent": "16"},
"175": {"name": "泰安市", "parent": "16"},
"176": {"name": "威海市", "parent": "16"},
"177": {"name": "日照市", "parent": "16"},
"179": {"name": "临沂市", "parent": "16"},
"180": {"name": "德州市", "parent": "16"},
"181": {"name": "聊城市", "parent": "16"},
"182": {"name": "滨州市", "parent": "16"},
"183": {"name": "菏泽市", "parent": "16"},
"184": {"name": "郑州市", "parent": "17"},
"185": {"name": "开封市", "parent": "17"},
"186": {"name": "洛阳市", "parent": "17"},
"187": {"name": "平顶山市", "parent": "17"},
"188": {"name": "安阳市", "parent": "17"},
"189": {"name": "鹤壁市", "parent": "17"},
"190": {"name": "新乡市", "parent": "17"},
"191": {"name": "焦作市", "parent": "17"},
"192": {"name": "濮阳市", "parent": "17"},
"193": {"name": "许昌市", "parent": "17"},
"194": {"name": "漯河市", "parent": "17"},
"195": {"name": "三门峡市", "parent": "17"},
"196": {"name": "南阳市", "parent": "17"},
"197": {"name": "商丘市", "parent": "17"},
"198": {"name": "信阳市", "parent": "17"},
"199": {"name": "周口市", "parent": "17"},
"200": {"name": "驻马店市", "parent": "17"},
"201": {"name": "武汉市", "parent": "18"},
"202": {"name": "黄石市", "parent": "18"},
"203": {"name": "十堰市", "parent": "18"},
"204": {"name": "宜昌市", "parent": "18"},
"205": {"name": "襄阳市", "parent": "18"},
"206": {"name": "鄂州市", "parent": "18"},
"207": {"name": "荆门市", "parent": "18"},
"208": {"name": "孝感市", "parent": "18"},
"209": {"name": "荆州市", "parent": "18"},
"210": {"name": "黄冈市", "parent": "18"},
"211": {"name": "咸宁市", "parent": "18"},
"212": {"name": "随州市", "parent": "18"},
"213": {"name": "恩施土家族苗族自治州", "parent": "18"},
"215": {"name": "长沙市", "parent": "19"},
"216": {"name": "株洲市", "parent": "19"},
"217": {"name": "湘潭市", "parent": "19"},
"218": {"name": "衡阳市", "parent": "19"},
"219": {"name": "邵阳市", "parent": "19"},
"220": {"name": "岳阳市", "parent": "19"},
"221": {"name": "常德市", "parent": "19"},
"222": {"name": "张家界市", "parent": "19"},
"223": {"name": "益阳市", "parent": "19"},
"224": {"name": "郴州市", "parent": "19"},
"225": {"name": "永州市", "parent": "19"},
"226": {"name": "怀化市", "parent": "19"},
"227": {"name": "娄底市", "parent": "19"},
"228": {"name": "湘西土家族苗族自治州", "parent": "19"},
"229": {"name": "广州市", "parent": "20"},
"230": {"name": "韶关市", "parent": "20"},
"231": {"name": "深圳市", "parent": "20"},
"232": {"name": "珠海市", "parent": "20"},
"233": {"name": "汕头市", "parent": "20"},
"234": {"name": "佛山市", "parent": "20"},
"235": {"name": "江门市", "parent": "20"},
"236": {"name": "湛江市", "parent": "20"},
"237": {"name": "茂名市", "parent": "20"},
"238": {"name": "肇庆市", "parent": "20"},
"239": {"name": "惠州市", "parent": "20"},
"240": {"name": "梅州市", "parent": "20"},
"241": {"name": "汕尾市", "parent": "20"},
"242": {"name": "河源市", "parent": "20"},
"243": {"name": "阳江市", "parent": "20"},
"244": {"name": "清远市", "parent": "20"},
"245": {"name": "东莞市", "parent": "20"},
"246": {"name": "中山市", "parent": "20"},
"247": {"name": "潮州市", "parent": "20"},
"248": {"name": "揭阳市", "parent": "20"},
"249": {"name": "云浮市", "parent": "20"},
"250": {"name": "南宁市", "parent": "21"},
"251": {"name": "柳州市", "parent": "21"},
"252": {"name": "桂林市", "parent": "21"},
"253": {"name": "梧州市", "parent": "21"},
"254": {"name": "北海市", "parent": "21"},
"255": {"name": "防城港市", "parent": "21"},
"256": {"name": "钦州市", "parent": "21"},
"257": {"name": "贵港市", "parent": "21"},
"258": {"name": "玉林市", "parent": "21"},
"259": {"name": "百色市", "parent": "21"},
"260": {"name": "贺州市", "parent": "21"},
"261": {"name": "河池市", "parent": "21"},
"262": {"name": "来宾市", "parent": "21"},
"263": {"name": "崇左市", "parent": "21"},
"264": {"name": "海口市", "parent": "22"},
"265": {"name": "三亚市", "parent": "22"},
"267": {"name": "重庆市", "parent": "23"},
"268": {"name": "成都市", "parent": "24"},
"269": {"name": "自贡市", "parent": "24"},
"270": {"name": "攀枝花市", "parent": "24"},
"271": {"name": "泸州市", "parent": "24"},
"272": {"name": "德阳市", "parent": "24"},
"273": {"name": "绵阳市", "parent": "24"},
"274": {"name": "广元市", "parent": "24"},
"275": {"name": "遂宁市", "parent": "24"},
"276": {"name": "内江市", "parent": "24"},
"277": {"name": "乐山市", "parent": "24"},
"278": {"name": "南充市", "parent": "24"},
"279": {"name": "眉山市", "parent": "24"},
"280": {"name": "宜宾市", "parent": "24"},
"281": {"name": "广安市", "parent": "24"},
"282": {"name": "达州市", "parent": "24"},
"283": {"name": "雅安市", "parent": "24"},
"284": {"name": "巴中市", "parent": "24"},
"285": {"name": "资阳市", "parent": "24"},
"286": {"name": "阿坝藏族羌族自治州", "parent": "24"},
"287": {"name": "甘孜藏族自治州", "parent": "24"},
"288": {"name": "凉山彝族自治州", "parent": "24"},
"289": {"name": "贵阳市", "parent": "25"},
"290": {"name": "六盘水市", "parent": "25"},
"291": {"name": "遵义市", "parent": "25"},
"292": {"name": "安顺市", "parent": "25"},
"293": {"name": "铜仁市", "parent": "25"},
"294": {"name": "黔西南布依族苗族自治州", "parent": "25"},
"295": {"name": "毕节市", "parent": "25"},
"296": {"name": "黔东南苗族侗族自治州", "parent": "25"},
"297": {"name": "黔南布依族苗族自治州", "parent": "25"},
"298": {"name": "昆明市", "parent": "26"},
"299": {"name": "曲靖市", "parent": "26"},
"300": {"name": "玉溪市", "parent": "26"},
"301": {"name": "保山市", "parent": "26"},
"302": {"name": "昭通市", "parent": "26"},
"303": {"name": "丽江市", "parent": "26"},
"304": {"name": "普洱市", "parent": "26"},
"305": {"name": "临沧市", "parent": "26"},
"306": {"name": "楚雄彝族自治州", "parent": "26"},
"307": {"name": "红河哈尼族彝族自治州", "parent": "26"},
"308": {"name": "文山壮族苗族自治州", "parent": "26"},
"309": {"name": "西双版纳傣族自治州", "parent": "26"},
"310": {"name": "大理白族自治州", "parent": "26"},
"311": {"name": "德宏傣族景颇族自治州", "parent": "26"},
"312": {"name": "怒江傈僳族自治州", "parent": "26"},
"313": {"name": "迪庆藏族自治州", "parent": "26"},
"314": {"name": "拉萨市", "parent": "27"},
"315": {"name": "昌都市", "parent": "27"},
"316": {"name": "山南市", "parent": "27"},
"317": {"name": "日喀则市", "parent": "27"},
"318": {"name": "那曲市", "parent": "27"},
"319": {"name": "阿里地区", "parent": "27"},
"320": {"name": "林芝市", "parent": "27"},
"321": {"name": "西安市", "parent": "28"},
"322": {"name": "铜川市", "parent": "28"},
"323": {"name": "宝鸡市", "parent": "28"},
"324": {"name": "咸阳市", "parent": "28"},
"325": {"name": "渭南市", "parent": "28"},
"326": {"name": "延安市", "parent": "28"},
"327": {"name": "汉中市", "parent": "28"},
"328": {"name": "榆林市", "parent": "28"},
"329": {"name": "安康市", "parent": "28"},
"330": {"name": "商洛市", "parent": "28"},
"331": {"name": "兰州市", "parent": "29"},
"332": {"name": "嘉峪关市", "parent": "29"},
"333": {"name": "金昌市", "parent": "29"},
"334": {"name": "白银市", "parent": "29"},
"335": {"name": "天水市", "parent": "29"},
"336": {"name": "武威市", "parent": "29"},
"337": {"name": "张掖市", "parent": "29"},
"338": {"name": "平凉市", "parent": "29"},
"339": {"name": "酒泉市", "parent": "29"},
"340": {"name": "庆阳市", "parent": "29"},
"341": {"name": "定西市", "parent": "29"},
"342": {"name": "陇南市", "parent": "29"},
"343": {"name": "临夏回族自治州", "parent": "29"},
"344": {"name": "甘南藏族自治州", "parent": "29"},
"345": {"name": "西宁市", "parent": "30"},
"346": {"name": "海东市", "parent": "30"},
"347": {"name": "海北藏族自治州", "parent": "30"},
"348": {"name": "黄南藏族自治州", "parent": "30"},
"349": {"name": "海南藏族自治州", "parent": "30"},
"350": {"name": "果洛藏族自治州", "parent": "30"},
"351": {"name": "玉树藏族自治州", "parent": "30"},
"352": {"name": "海西蒙古族藏族自治州", "parent": "30"},
"353": {"name": "银川市", "parent": "31"},
"354": {"name": "石嘴山市", "parent": "31"},
"355": {"name": "吴忠市", "parent": "31"},
"356": {"name": "固原市", "parent": "31"},
"357": {"name": "中卫市", "parent": "31"},
"358": {"name": "乌鲁木齐市", "parent": "32"},
"359": {"name": "克拉玛依市", "parent": "32"},
"360": {"name": "吐鲁番市", "parent": "32"},
"361": {"name": "哈密市", "parent": "32"},
"362": {"name": "昌吉回族自治州", "parent": "32"},
"363": {"name": "博尔塔拉蒙古自治州", "parent": "32"},
"364": {"name": "巴音郭楞蒙古自治州", "parent": "32"},
"365": {"name": "阿克苏地区", "parent": "32"},
"366": {"name": "克孜勒苏柯尔克孜自治州", "parent": "32"},
"367": {"name": "喀什地区", "parent": "32"},
"368": {"name": "和田地区", "parent": "32"},
"369": {"name": "伊犁哈萨克自治州", "parent": "32"},
"370": {"name": "塔城地区", "parent": "32"},
"371": {"name": "阿勒泰地区", "parent": "32"},
"372": {"name": "新疆省直辖行政单位", "parent": "32"},
"373": {"name": "可克达拉市", "parent": "32"},
"374": {"name": "昆玉市", "parent": "32"},
"375": {"name": "胡杨河市", "parent": "32"},
"376": {"name": "双河市", "parent": "32"},
"3560": {"name": "北票市", "parent": "7"},
"3615": {"name": "高州市", "parent": "20"},
"3651": {"name": "济源市", "parent": "17"},
"3662": {"name": "胶南市", "parent": "16"},
"3683": {"name": "老河口市", "parent": "18"},
"3758": {"name": "沙河市", "parent": "4"},
"3822": {"name": "宜城市", "parent": "18"},
"3842": {"name": "枣阳市", "parent": "18"},
"3850": {"name": "肇东市", "parent": "9"},
"3905": {"name": "澳门", "parent": "1"},
"3906": {"name": "澳门", "parent": "3905"},
"3907": {"name": "香港", "parent": "1"},
"3908": {"name": "香港", "parent": "3907"},
"3947": {"name": "仙桃市", "parent": "18"},
"3954": {"name": "台湾", "parent": "1"},
"3955": {"name": "台湾", "parent": "3954"},
"3956": {"name": "海外", "parent": "1"},
"3957": {"name": "海外", "parent": "3956"},
"3958": {"name": "美国", "parent": "3956"},
"3959": {"name": "加拿大", "parent": "3956"},
"3961": {"name": "日本", "parent": "3956"},
"3962": {"name": "韩国", "parent": "3956"},
"3963": {"name": "德国", "parent": "3956"},
"3964": {"name": "英国", "parent": "3956"},
"3965": {"name": "意大利", "parent": "3956"},
"3966": {"name": "西班牙", "parent": "3956"},
"3967": {"name": "法国", "parent": "3956"},
"3968": {"name": "澳大利亚", "parent": "3956"},
"3969": {"name": "东城区", "parent": "2"},
"3970": {"name": "西城区", "parent": "2"},
"3971": {"name": "崇文区", "parent": "2"},
"3972": {"name": "宣武区", "parent": "2"},
"3973": {"name": "朝阳区", "parent": "2"},
"3974": {"name": "海淀区", "parent": "2"},
"3975": {"name": "丰台区", "parent": "2"},
"3976": {"name": "石景山区", "parent": "2"},
"3977": {"name": "门头沟区", "parent": "2"},
"3978": {"name": "房山区", "parent": "2"},
"3979": {"name": "通州区", "parent": "2"},
"3980": {"name": "顺义区", "parent": "2"},
"3981": {"name": "昌平区", "parent": "2"},
"3982": {"name": "大兴区", "parent": "2"},
"3983": {"name": "平谷区", "parent": "2"},
"3984": {"name": "怀柔区", "parent": "2"},
"3985": {"name": "密云区", "parent": "2"},
"3986": {"name": "延庆区", "parent": "2"},
"3987": {"name": "黄浦区", "parent": "10"},
"3988": {"name": "徐汇区", "parent": "10"},
"3989": {"name": "长宁区", "parent": "10"},
"3990": {"name": "静安区", "parent": "10"},
"3991": {"name": "普陀区", "parent": "10"},
"3992": {"name": "闸北区", "parent": "10"},
"3993": {"name": "虹口区", "parent": "10"},
"3994": {"name": "杨浦区", "parent": "10"},
"3995": {"name": "宝山区", "parent": "10"},
"3996": {"name": "闵行区", "parent": "10"},
"3997": {"name": "嘉定区", "parent": "10"},
"3998": {"name": "浦东新区", "parent": "10"},
"3999": {"name": "松江区", "parent": "10"},
"4000": {"name": "金山区", "parent": "10"},
"4001": {"name": "青浦区", "parent": "10"},
"4002": {"name": "奉贤区", "parent": "10"},
"4003": {"name": "崇明区", "parent": "10"},
"4004": {"name": "和平区", "parent": "3"},
"4005": {"name": "河东区", "parent": "3"},
"4006": {"name": "河西区", "parent": "3"},
"4007": {"name": "南开区", "parent": "3"},
"4008": {"name": "红桥区", "parent": "3"},
"4009": {"name": "河北区", "parent": "3"},
"4010": {"name": "滨海新区", "parent": "3"},
"4011": {"name": "东丽区", "parent": "3"},
"4012": {"name": "西青区", "parent": "3"},
"4013": {"name": "北辰区", "parent": "3"},
"4014": {"name": "津南区", "parent": "3"},
"4015": {"name": "武清区", "parent": "3"},
"4016": {"name": "宝坻区", "parent": "3"},
"4017": {"name": "静海区", "parent": "3"},
"4018": {"name": "宁河区", "parent": "3"},
"4019": {"name": "蓟州区", "parent": "3"},
"4020": {"name": "渝中区", "parent": "23"},
"4021": {"name": "江北区", "parent": "23"},
"4022": {"name": "南岸区", "parent": "23"},
"4023": {"name": "沙坪坝区", "parent": "23"},
"4024": {"name": "九龙坡区", "parent": "23"},
"4025": {"name": "大渡口区", "parent": "23"},
"4026": {"name": "渝北区", "parent": "23"},
"4027": {"name": "巴南区", "parent": "23"},
"4028": {"name": "北碚区", "parent": "23"},
"4029": {"name": "万州区", "parent": "23"},
"4030": {"name": "黔江区", "parent": "23"},
"4031": {"name": "永川区", "parent": "23"},
"4032": {"name": "涪陵区", "parent": "23"},
"4033": {"name": "江津区", "parent": "23"},
"4034": {"name": "合川区", "parent": "23"},
"4035": {"name": "双桥区", "parent": "23"},
"4036": {"name": "万盛区", "parent": "23"},
"4037": {"name": "荣昌区", "parent": "23"},
"4038": {"name": "大足区", "parent": "23"},
"4039": {"name": "璧山区", "parent": "23"},
"4040": {"name": "铜梁区", "parent": "23"},
"4041": {"name": "潼南区", "parent": "23"},
"4042": {"name": "綦江区", "parent": "23"},
"4043": {"name": "忠县", "parent": "23"},
"4044": {"name": "开州区", "parent": "23"},
"4045": {"name": "云阳县", "parent": "23"},
"4046": {"name": "梁平区", "parent": "23"},
"4047": {"name": "垫江县", "parent": "23"},
"4048": {"name": "丰都县", "parent": "23"},
"4049": {"name": "奉节县", "parent": "23"},
"4050": {"name": "巫山县", "parent": "23"},
"4051": {"name": "巫溪县", "parent": "23"},
"4052": {"name": "城口县", "parent": "23"},
"4053": {"name": "武隆区", "parent": "23"},
"4054": {"name": "石柱土家族自治县", "parent": "23"},
"4055": {"name": "秀山土家族苗族自治县", "parent": "23"},
"4056": {"name": "酉阳土家族苗族自治县", "parent": "23"},
"4057": {"name": "彭水苗族土家族自治县", "parent": "23"},
"4058": {"name": "潜江市", "parent": "18"},
"4059": {"name": "三沙市", "parent": "22"},
"4060": {"name": "石河子市", "parent": "32"},
"4061": {"name": "阿拉尔市", "parent": "32"},
"4062": {"name": "图木舒克市", "parent": "32"},
"4063": {"name": "五家渠市", "parent": "32"},
"4064": {"name": "北屯市", "parent": "32"},
"4065": {"name": "铁门关市", "parent": "32"},
"4066": {"name": "儋州市", "parent": "22"},
"4067": {"name": "五指山市", "parent": "22"},
"4068": {"name": "文昌市", "parent": "22"},
"4069": {"name": "琼海市", "parent": "22"},
"4070": {"name": "万宁市", "parent": "22"},
"4072": {"name": "定安县", "parent": "22"},
"4073": {"name": "屯昌县", "parent": "22"},
"4074": {"name": "澄迈县", "parent": "22"},
"4075": {"name": "临高县", "parent": "22"},
"4076": {"name": "琼中黎族苗族自治县", "parent": "22"},
"4077": {"name": "保亭黎族苗族自治县", "parent": "22"},
"4078": {"name": "白沙黎族自治县", "parent": "22"},
"4079": {"name": "昌江黎族自治县", "parent": "22"},
"4080": {"name": "乐东黎族自治县", "parent": "22"},
"4081": {"name": "陵水黎族自治县", "parent": "22"},
"4082": {"name": "马来西亚", "parent": "3956"},
"6047": {"name": "长寿区", "parent": "23"},
"6857": {"name": "阿富汗", "parent": "3956"},
"6858": {"name": "阿尔巴尼亚", "parent": "3956"},
"6859": {"name": "阿尔及利亚", "parent": "3956"},
"6860": {"name": "美属萨摩亚", "parent": "3956"},
"6861": {"name": "安道尔", "parent": "3956"},
"6862": {"name": "安哥拉", "parent": "3956"},
"6863": {"name": "安圭拉", "parent": "3956"},
"6864": {"name": "南极洲", "parent": "3956"},
"6865": {"name": "安提瓜和巴布达", "parent": "3956"},
"6866": {"name": "阿根廷", "parent": "3956"},
"6867": {"name": "亚美尼亚", "parent": "3956"},
"6869": {"name": "奥地利", "parent": "3956"},
"6870": {"name": "阿塞拜疆", "parent": "3956"},
"6871": {"name": "巴哈马", "parent": "3956"},
"6872": {"name": "巴林", "parent": "3956"},
"6873": {"name": "孟加拉国", "parent": "3956"},
"6874": {"name": "巴巴多斯", "parent": "3956"},
"6875": {"name": "白俄罗斯", "parent": "3956"},
"6876": {"name": "比利时", "parent": "3956"},
"6877": {"name": "伯利兹", "parent": "3956"},
"6878": {"name": "贝宁", "parent": "3956"},
"6879": {"name": "百慕大", "parent": "3956"},
"6880": {"name": "不丹", "parent": "3956"},
"6881": {"name": "玻利维亚", "parent": "3956"},
"6882": {"name": "波黑", "parent": "3956"},
"6883": {"name": "博茨瓦纳", "parent": "3956"},
"6884": {"name": "布维岛", "parent": "3956"},
"6885": {"name": "巴西", "parent": "3956"},
"6886": {"name": "英属印度洋领土", "parent": "3956"},
"6887": {"name": "文莱", "parent": "3956"},
"6888": {"name": "保加利亚", "parent": "3956"},
"6889": {"name": "布基纳法索", "parent": "3956"},
"6890": {"name": "布隆迪", "parent": "3956"},
"6891": {"name": "柬埔寨", "parent": "3956"},
"6892": {"name": "喀麦隆", "parent": "3956"},
"6893": {"name": "佛得角", "parent": "3956"},
"6894": {"name": "开曼群岛", "parent": "3956"},
"6895": {"name": "中非", "parent": "3956"},
"6896": {"name": "乍得", "parent": "3956"},
"6897": {"name": "智利", "parent": "3956"},
"6898": {"name": "圣诞岛", "parent": "3956"},
"6899": {"name": "科科斯(基林)群岛", "parent": "3956"},
"6900": {"name": "哥伦比亚", "parent": "3956"},
"6901": {"name": "科摩罗", "parent": "3956"},
"6902": {"name": "刚果(布)", "parent": "3956"},
"6903": {"name": "刚果(金)", "parent": "3956"},
"6904": {"name": "库克群岛", "parent": "3956"},
"6905": {"name": "哥斯达黎加", "parent": "3956"},
"6906": {"name": "科特迪瓦", "parent": "3956"},
"6907": {"name": "克罗地亚", "parent": "3956"},
"6908": {"name": "古巴", "parent": "3956"},
"6909": {"name": "塞浦路斯", "parent": "3956"},
"6910": {"name": "捷克", "parent": "3956"},
"6911": {"name": "丹麦", "parent": "3956"},
"6912": {"name": "吉布提", "parent": "3956"},
"6913": {"name": "多米尼克", "parent": "3956"},
"6914": {"name": "多米尼加共和国", "parent": "3956"},
"6915": {"name": "东帝汶", "parent": "3956"},
"6916": {"name": "厄瓜多尔", "parent": "3956"},
"6917": {"name": "埃及", "parent": "3956"},
"6918": {"name": "萨尔瓦多", "parent": "3956"},
"6919": {"name": "赤道几内亚", "parent": "3956"},
"6920": {"name": "厄立特里亚", "parent": "3956"},
"6921": {"name": "爱沙尼亚", "parent": "3956"},
"6922": {"name": "埃塞俄比亚", "parent": "3956"},
"6923": {"name": "福克兰群岛(马尔维纳斯)", "parent": "3956"},
"6924": {"name": "法罗群岛", "parent": "3956"},
"6925": {"name": "斐济", "parent": "3956"},
"6926": {"name": "芬兰", "parent": "3956"},
"6927": {"name": "法属圭亚那", "parent": "3956"},
"6928": {"name": "法属波利尼西亚", "parent": "3956"},
"6929": {"name": "法属南部领土", "parent": "3956"},
"6930": {"name": "加蓬", "parent": "3956"},
"6931": {"name": "冈比亚", "parent": "3956"},
"6932": {"name": "格鲁吉亚", "parent": "3956"},
"6933": {"name": "加纳", "parent": "3956"},
"6934": {"name": "直布罗陀", "parent": "3956"},
"6935": {"name": "希腊", "parent": "3956"},
"6936": {"name": "格陵兰", "parent": "3956"},
"6937": {"name": "格林纳达", "parent": "3956"},
"6938": {"name": "瓜德罗普", "parent": "3956"},
"6939": {"name": "关岛", "parent": "3956"},
"6940": {"name": "危地马拉", "parent": "3956"},
"6941": {"name": "几内亚", "parent": "3956"},
"6942": {"name": "几内亚比绍", "parent": "3956"},
"6943": {"name": "圭亚那", "parent": "3956"},
"6944": {"name": "海地", "parent": "3956"},
"6945": {"name": "赫德岛和麦克唐纳岛", "parent": "3956"},
"6946": {"name": "洪都拉斯", "parent": "3956"},
"6947": {"name": "匈牙利", "parent": "3956"},
"6948": {"name": "冰岛", "parent": "3956"},
"6949": {"name": "印度", "parent": "3956"},
"6950": {"name": "印度尼西亚", "parent": "3956"},
"6951": {"name": "伊朗", "parent": "3956"},
"6952": {"name": "伊拉克", "parent": "3956"},
"6953": {"name": "爱尔兰", "parent": "3956"},
"6954": {"name": "以色列", "parent": "3956"},
"6955": {"name": "牙买加", "parent": "3956"},
"6956": {"name": "约旦", "parent": "3956"},
"6957": {"name": "哈萨克斯坦", "parent": "3956"},
"6958": {"name": "肯尼亚", "parent": "3956"},
"6959": {"name": "基里巴斯", "parent": "3956"},
"6960": {"name": "朝鲜", "parent": "3956"},
"6961": {"name": "科威特", "parent": "3956"},
"6962": {"name": "吉尔吉斯斯坦", "parent": "3956"},
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/entities/schools.py | deepdoc/parser/resume/entities/schools.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import re
import copy
import pandas as pd
current_file_path = os.path.dirname(os.path.abspath(__file__))
TBL = pd.read_csv(
os.path.join(current_file_path, "res/schools.csv"), sep="\t", header=0
).fillna("")
TBL["name_en"] = TBL["name_en"].map(lambda x: x.lower().strip())
GOOD_SCH = json.load(open(os.path.join(current_file_path, "res/good_sch.json"), "r",encoding="utf-8"))
GOOD_SCH = set([re.sub(r"[,. &()()]+", "", c) for c in GOOD_SCH])
def loadRank(fnm):
global TBL
TBL["rank"] = 1000000
with open(fnm, "r", encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
break
line = line.strip("\n").split(",")
try:
nm, rk = line[0].strip(), int(line[1])
# assert len(TBL[((TBL.name_cn == nm) | (TBL.name_en == nm))]),f"<{nm}>"
TBL.loc[((TBL.name_cn == nm) | (TBL.name_en == nm)), "rank"] = rk
except Exception:
pass
loadRank(os.path.join(current_file_path, "res/school.rank.csv"))
def split(txt):
tks = []
for t in re.sub(r"[ \t]+", " ", txt).split():
if (
tks
and re.match(r".*[a-zA-Z]$", tks[-1])
and re.match(r"[a-zA-Z]", t)
and tks
):
tks[-1] = tks[-1] + " " + t
else:
tks.append(t)
return tks
def select(nm):
global TBL
if not nm:
return
if isinstance(nm, list):
nm = str(nm[0])
nm = split(nm)[0]
nm = str(nm).lower().strip()
nm = re.sub(r"[((][^()()]+[))]", "", nm.lower())
nm = re.sub(r"(^the |[,.&()();;·]+|^(英国|美国|瑞士))", "", nm)
nm = re.sub(r"大学.*学院", "大学", nm)
tbl = copy.deepcopy(TBL)
tbl["hit_alias"] = tbl["alias"].map(lambda x: nm in set(x.split("+")))
res = tbl[((tbl.name_cn == nm) | (tbl.name_en == nm) | tbl.hit_alias)]
if res.empty:
return
return json.loads(res.to_json(orient="records"))[0]
def is_good(nm):
global GOOD_SCH
nm = re.sub(r"[((][^()()]+[))]", "", nm.lower())
nm = re.sub(r"[''`‘’“”,. &()();;]+", "", nm)
return nm in GOOD_SCH
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/entities/industries.py | deepdoc/parser/resume/entities/industries.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TBL = {
"1": {"name": "IT/通信/电子", "parent": "0"},
"2": {"name": "互联网", "parent": "0"},
"3": {"name": "电子商务", "parent": "2"},
"4": {"name": "互联网金融", "parent": "2"},
"5": {"name": "网络游戏", "parent": "2"},
"6": {"name": "社交网络平台", "parent": "2"},
"7": {"name": "视频音乐", "parent": "2"},
"9": {"name": "安全", "parent": "2"},
"10": {"name": "云计算", "parent": "2"},
"12": {"name": "工具类客户端应用", "parent": "2"},
"13": {"name": "互联网广告", "parent": "2"},
"14": {"name": "企业互联网服务", "parent": "2"},
"16": {"name": "在线教育", "parent": "2"},
"17": {"name": "在线医疗", "parent": "2"},
"19": {"name": "B2B", "parent": "3"},
"20": {"name": "B2C", "parent": "3"},
"21": {"name": "C2C", "parent": "3"},
"22": {"name": "生活信息本地化", "parent": "3"},
"23": {"name": "在线旅游", "parent": "2"},
"24": {"name": "第三方支付", "parent": "4"},
"26": {"name": "客户端游戏", "parent": "5"},
"27": {"name": "网页游戏", "parent": "5"},
"28": {"name": "手机游戏", "parent": "5"},
"29": {"name": "微博", "parent": "6"},
"30": {"name": "社交网站", "parent": "6"},
"31": {"name": "在线视频", "parent": "7"},
"32": {"name": "在线音乐", "parent": "7"},
"35": {"name": "企业安全", "parent": "9"},
"36": {"name": "个人安全", "parent": "9"},
"37": {"name": "企业级云服务", "parent": "10"},
"38": {"name": "个人级云服务", "parent": "10"},
"43": {"name": "输入法", "parent": "12"},
"44": {"name": "浏览器", "parent": "12"},
"45": {"name": "词典", "parent": "12"},
"46": {"name": "播放器", "parent": "12"},
"47": {"name": "下载器", "parent": "12"},
"48": {"name": "IM", "parent": "12"},
"49": {"name": "广告服务", "parent": "13"},
"50": {"name": "第三方广告网络平台", "parent": "13"},
"51": {"name": "媒体代理", "parent": "13"},
"52": {"name": "创意代理", "parent": "13"},
"53": {"name": "IT-综合", "parent": "1"},
"71": {"name": "团购", "parent": "3"},
"72": {"name": "地图", "parent": "2"},
"73": {"name": "数据存储", "parent": "2"},
"414": {"name": "计算机软件", "parent": "1"},
"415": {"name": "计算机硬件", "parent": "1"},
"416": {"name": "计算机服务(系统、数据服务、维修)", "parent": "1"},
"417": {"name": "通信/电信/网络设备", "parent": "1"},
"418": {"name": "通信/电信运营、增值服务", "parent": "1"},
"419": {"name": "电子技术/半导体/集成电路", "parent": "1"},
"472": {"name": "P2P网贷", "parent": "4"},
"473": {"name": "互联网理财", "parent": "4"},
"474": {"name": "婚恋", "parent": "6"},
"476": {"name": "虚拟化", "parent": "10"},
"477": {"name": "邮箱", "parent": "12"},
"478": {"name": "商业智能", "parent": "14"},
"479": {"name": "企业建站", "parent": "14"},
"480": {"name": "安防", "parent": "14"},
"481": {"name": "网络营销", "parent": "2"},
"487": {"name": "智能终端", "parent": "2"},
"488": {"name": "移动互联网", "parent": "2"},
"489": {"name": "数字城市", "parent": "2"},
"490": {"name": "大数据", "parent": "2"},
"491": {"name": "互联网人力资源", "parent": "2"},
"492": {"name": "舆情监控", "parent": "2"},
"493": {"name": "移动营销", "parent": "481"},
"494": {"name": "微博营销", "parent": "481"},
"495": {"name": "精准营销", "parent": "481"},
"496": {"name": "海外营销", "parent": "481"},
"497": {"name": "微信营销", "parent": "481"},
"498": {"name": "智能手机", "parent": "487"},
"499": {"name": "可穿戴设备", "parent": "487"},
"500": {"name": "智能电视", "parent": "487"},
"501": {"name": "WAP", "parent": "488"},
"502": {"name": "物联网", "parent": "489"},
"503": {"name": "O2O", "parent": "489"},
"504": {"name": "数字出版", "parent": "489"},
"505": {"name": "搜索", "parent": "2"},
"506": {"name": "垂直搜索", "parent": "505"},
"507": {"name": "无线搜索", "parent": "505"},
"508": {"name": "网页搜索", "parent": "505"},
"509": {"name": "网址导航", "parent": "2"},
"510": {"name": "门户", "parent": "2"},
"511": {"name": "网络文学", "parent": "2"},
"512": {"name": "自媒体", "parent": "2"},
"513": {"name": "金融", "parent": "0"},
"514": {"name": "建筑与房地产", "parent": "0"},
"515": {"name": "专业服务", "parent": "0"},
"516": {"name": "教育培训", "parent": "0"},
"517": {"name": "文化传媒", "parent": "0"},
"518": {"name": "消费品", "parent": "0"},
"519": {"name": "工业", "parent": "0"},
"520": {"name": "交通物流", "parent": "0"},
"521": {"name": "贸易", "parent": "0"},
"522": {"name": "医药", "parent": "0"},
"523": {"name": "医疗器械", "parent": "522"},
"524": {"name": "保健品", "parent": "518"},
"525": {"name": "服务业", "parent": "0"},
"526": {"name": "能源/矿产/环保", "parent": "0"},
"527": {"name": "化工", "parent": "0"},
"528": {"name": "政府", "parent": "0"},
"529": {"name": "公共事业", "parent": "0"},
"530": {"name": "非盈利机构", "parent": "0"},
"531": {"name": "农业", "parent": "1131"},
"532": {"name": "林业", "parent": "1131"},
"533": {"name": "畜牧业", "parent": "1131"},
"534": {"name": "渔业", "parent": "1131"},
"535": {"name": "学术科研", "parent": "0"},
"536": {"name": "零售", "parent": "0"},
"537": {"name": "银行", "parent": "513"},
"538": {"name": "保险", "parent": "513"},
"539": {"name": "证券", "parent": "513"},
"540": {"name": "基金", "parent": "513"},
"541": {"name": "信托", "parent": "513"},
"542": {"name": "担保", "parent": "513"},
"543": {"name": "典当", "parent": "513"},
"544": {"name": "拍卖", "parent": "513"},
"545": {"name": "投资/融资", "parent": "513"},
"546": {"name": "期货", "parent": "513"},
"547": {"name": "房地产开发", "parent": "514"},
"548": {"name": "工程施工", "parent": "514"},
"549": {"name": "建筑设计", "parent": "514"},
"550": {"name": "房地产代理", "parent": "514"},
"551": {"name": "物业管理", "parent": "514"},
"552": {"name": "室内设计", "parent": "514"},
"553": {"name": "装修装潢", "parent": "514"},
"554": {"name": "市政工程", "parent": "514"},
"555": {"name": "工程造价", "parent": "514"},
"556": {"name": "工程监理", "parent": "514"},
"557": {"name": "环境工程", "parent": "514"},
"558": {"name": "园林景观", "parent": "514"},
"559": {"name": "法律", "parent": "515"},
"560": {"name": "人力资源", "parent": "515"},
"561": {"name": "会计", "parent": "1125"},
"562": {"name": "审计", "parent": "515"},
"563": {"name": "检测认证", "parent": "515"},
"565": {"name": "翻译", "parent": "515"},
"566": {"name": "中介", "parent": "515"},
"567": {"name": "咨询", "parent": "515"},
"568": {"name": "外包服务", "parent": "515"},
"569": {"name": "家教", "parent": "516"},
"570": {"name": "早教", "parent": "516"},
"571": {"name": "职业技能培训", "parent": "516"},
"572": {"name": "外语培训", "parent": "516"},
"573": {"name": "设计培训", "parent": "516"},
"574": {"name": "IT培训", "parent": "516"},
"575": {"name": "文艺体育培训", "parent": "516"},
"576": {"name": "学历教育", "parent": "516"},
"577": {"name": "管理培训", "parent": "516"},
"578": {"name": "民办基础教育", "parent": "516"},
"579": {"name": "广告", "parent": "517"},
"580": {"name": "媒体", "parent": "517"},
"581": {"name": "会展", "parent": "517"},
"582": {"name": "公关", "parent": "517"},
"583": {"name": "影视", "parent": "517"},
"584": {"name": "艺术", "parent": "517"},
"585": {"name": "文化传播", "parent": "517"},
"586": {"name": "娱乐", "parent": "517"},
"587": {"name": "体育", "parent": "517"},
"588": {"name": "出版", "parent": "517"},
"589": {"name": "休闲", "parent": "517"},
"590": {"name": "动漫", "parent": "517"},
"591": {"name": "市场推广", "parent": "517"},
"592": {"name": "市场研究", "parent": "517"},
"593": {"name": "食品", "parent": "1129"},
"594": {"name": "饮料", "parent": "1129"},
"595": {"name": "烟草", "parent": "1129"},
"596": {"name": "酒品", "parent": "518"},
"597": {"name": "服饰", "parent": "518"},
"598": {"name": "纺织", "parent": "518"},
"599": {"name": "化妆品", "parent": "1129"},
"600": {"name": "日用品", "parent": "1129"},
"601": {"name": "家电", "parent": "518"},
"602": {"name": "家具", "parent": "518"},
"603": {"name": "办公用品", "parent": "518"},
"604": {"name": "奢侈品", "parent": "518"},
"605": {"name": "珠宝", "parent": "518"},
"606": {"name": "数码产品", "parent": "518"},
"607": {"name": "玩具", "parent": "518"},
"608": {"name": "图书", "parent": "518"},
"609": {"name": "音像", "parent": "518"},
"610": {"name": "钟表", "parent": "518"},
"611": {"name": "箱包", "parent": "518"},
"612": {"name": "母婴", "parent": "518"},
"613": {"name": "营养保健", "parent": "518"},
"614": {"name": "户外用品", "parent": "518"},
"615": {"name": "健身器材", "parent": "518"},
"616": {"name": "乐器", "parent": "518"},
"617": {"name": "汽车用品", "parent": "518"},
"619": {"name": "厨具", "parent": "518"},
"620": {"name": "机械制造", "parent": "519"},
"621": {"name": "流体控制", "parent": "519"},
"622": {"name": "自动化控制", "parent": "519"},
"623": {"name": "仪器仪表", "parent": "519"},
"624": {"name": "航空/航天", "parent": "519"},
"625": {"name": "交通设施", "parent": "519"},
"626": {"name": "工业电子", "parent": "519"},
"627": {"name": "建材", "parent": "519"},
"628": {"name": "五金材料", "parent": "519"},
"629": {"name": "汽车", "parent": "519"},
"630": {"name": "印刷", "parent": "519"},
"631": {"name": "造纸", "parent": "519"},
"632": {"name": "包装", "parent": "519"},
"633": {"name": "原材料及加工", "parent": "519"},
"634": {"name": "物流", "parent": "520"},
"635": {"name": "仓储", "parent": "520"},
"636": {"name": "客运", "parent": "520"},
"637": {"name": "快递", "parent": "520"},
"638": {"name": "化学药", "parent": "522"},
"639": {"name": "中药", "parent": "522"},
"640": {"name": "生物制药", "parent": "522"},
"641": {"name": "兽药", "parent": "522"},
"642": {"name": "农药", "parent": "522"},
"643": {"name": "CRO", "parent": "522"},
"644": {"name": "消毒", "parent": "522"},
"645": {"name": "医药商业", "parent": "522"},
"646": {"name": "医疗服务", "parent": "522"},
"647": {"name": "医疗器械", "parent": "523"},
"648": {"name": "制药设备", "parent": "523"},
"649": {"name": "医用耗材", "parent": "523"},
"650": {"name": "手术器械", "parent": "523"},
"651": {"name": "保健器材", "parent": "524"},
"652": {"name": "性保健品", "parent": "524"},
"653": {"name": "医药保养", "parent": "524"},
"654": {"name": "医用保健", "parent": "524"},
"655": {"name": "酒店", "parent": "525"},
"656": {"name": "餐饮", "parent": "525"},
"657": {"name": "旅游", "parent": "525"},
"658": {"name": "生活服务", "parent": "525"},
"659": {"name": "保健服务", "parent": "525"},
"660": {"name": "运动健身", "parent": "525"},
"661": {"name": "家政服务", "parent": "525"},
"662": {"name": "婚庆服务", "parent": "525"},
"663": {"name": "租赁服务", "parent": "525"},
"664": {"name": "维修服务", "parent": "525"},
"665": {"name": "石油天然气", "parent": "526"},
"666": {"name": "电力", "parent": "526"},
"667": {"name": "新能源", "parent": "526"},
"668": {"name": "水利", "parent": "526"},
"669": {"name": "矿产", "parent": "526"},
"670": {"name": "采掘业", "parent": "526"},
"671": {"name": "冶炼", "parent": "526"},
"672": {"name": "环保", "parent": "526"},
"673": {"name": "无机化工原料", "parent": "527"},
"674": {"name": "有机化工原料", "parent": "527"},
"675": {"name": "精细化学品", "parent": "527"},
"676": {"name": "化工设备", "parent": "527"},
"677": {"name": "化工工程", "parent": "527"},
"678": {"name": "资产管理", "parent": "513"},
"679": {"name": "金融租赁", "parent": "513"},
"680": {"name": "征信及信评机构", "parent": "513"},
"681": {"name": "资产评估机构", "parent": "513"},
"683": {"name": "金融监管机构", "parent": "513"},
"684": {"name": "国际贸易", "parent": "521"},
"685": {"name": "海关", "parent": "521"},
"686": {"name": "购物中心", "parent": "536"},
"687": {"name": "超市", "parent": "536"},
"688": {"name": "便利店", "parent": "536"},
"689": {"name": "专卖店", "parent": "536"},
"690": {"name": "专业店", "parent": "536"},
"691": {"name": "百货店", "parent": "536"},
"692": {"name": "杂货店", "parent": "536"},
"693": {"name": "个人银行", "parent": "537"},
"695": {"name": "私人银行", "parent": "537"},
"696": {"name": "公司银行", "parent": "537"},
"697": {"name": "投资银行", "parent": "537"},
"698": {"name": "政策性银行", "parent": "537"},
"699": {"name": "中央银行", "parent": "537"},
"700": {"name": "人寿险", "parent": "538"},
"701": {"name": "财产险", "parent": "538"},
"702": {"name": "再保险", "parent": "538"},
"703": {"name": "养老险", "parent": "538"},
"704": {"name": "保险代理公司", "parent": "538"},
"705": {"name": "公募基金", "parent": "540"},
"707": {"name": "私募基金", "parent": "540"},
"708": {"name": "第三方理财", "parent": "679"},
"709": {"name": "资产管理公司", "parent": "679"},
"711": {"name": "房产中介", "parent": "566"},
"712": {"name": "职业中介", "parent": "566"},
"713": {"name": "婚姻中介", "parent": "566"},
"714": {"name": "战略咨询", "parent": "567"},
"715": {"name": "投资咨询", "parent": "567"},
"716": {"name": "心理咨询", "parent": "567"},
"717": {"name": "留学移民咨询", "parent": "567"},
"718": {"name": "工商注册代理", "parent": "568"},
"719": {"name": "商标专利代理", "parent": "568"},
"720": {"name": "财务代理", "parent": "568"},
"721": {"name": "工程机械", "parent": "620"},
"722": {"name": "农业机械", "parent": "620"},
"723": {"name": "海工设备", "parent": "620"},
"724": {"name": "包装机械", "parent": "620"},
"725": {"name": "印刷机械", "parent": "620"},
"726": {"name": "数控机床", "parent": "620"},
"727": {"name": "矿山机械", "parent": "620"},
"728": {"name": "水泵", "parent": "621"},
"729": {"name": "管道", "parent": "621"},
"730": {"name": "阀门", "parent": "621"},
"732": {"name": "压缩机", "parent": "621"},
"733": {"name": "集散控制系统", "parent": "622"},
"734": {"name": "远程控制", "parent": "622"},
"735": {"name": "液压系统", "parent": "622"},
"736": {"name": "楼宇智能化", "parent": "622"},
"737": {"name": "飞机制造", "parent": "624"},
"738": {"name": "航空公司", "parent": "624"},
"739": {"name": "发动机", "parent": "624"},
"740": {"name": "复合材料", "parent": "624"},
"741": {"name": "高铁", "parent": "625"},
"742": {"name": "地铁", "parent": "625"},
"743": {"name": "信号传输", "parent": "625"},
"745": {"name": "结构材料", "parent": "627"},
"746": {"name": "装饰材料", "parent": "627"},
"747": {"name": "专用材料", "parent": "627"},
"749": {"name": "经销商集团", "parent": "629"},
"750": {"name": "整车制造", "parent": "629"},
"751": {"name": "汽车零配件", "parent": "629"},
"752": {"name": "外型设计", "parent": "629"},
"753": {"name": "平版印刷", "parent": "630"},
"754": {"name": "凸版印刷", "parent": "630"},
"755": {"name": "凹版印刷", "parent": "630"},
"756": {"name": "孔版印刷", "parent": "630"},
"757": {"name": "印刷用纸", "parent": "631"},
"758": {"name": "书写、制图及复制用纸", "parent": "631"},
"759": {"name": "包装用纸", "parent": "631"},
"760": {"name": "生活、卫生及装饰用纸", "parent": "631"},
"761": {"name": "技术用纸", "parent": "631"},
"762": {"name": "加工纸原纸", "parent": "631"},
"763": {"name": "食品包装", "parent": "632"},
"764": {"name": "医药包装", "parent": "632"},
"765": {"name": "日化包装", "parent": "632"},
"766": {"name": "物流包装", "parent": "632"},
"767": {"name": "礼品包装", "parent": "632"},
"768": {"name": "电子五金包装", "parent": "632"},
"769": {"name": "汽车服务", "parent": "525"},
"770": {"name": "汽车保养", "parent": "769"},
"771": {"name": "租车", "parent": "769"},
"773": {"name": "出租车", "parent": "769"},
"774": {"name": "代驾", "parent": "769"},
"775": {"name": "发电", "parent": "666"},
"777": {"name": "输配电", "parent": "666"},
"779": {"name": "风电", "parent": "667"},
"780": {"name": "光伏/太阳能", "parent": "667"},
"781": {"name": "生物质发电", "parent": "667"},
"782": {"name": "煤化工", "parent": "667"},
"783": {"name": "垃圾发电", "parent": "667"},
"784": {"name": "核电", "parent": "667"},
"785": {"name": "能源矿产", "parent": "669"},
"786": {"name": "金属矿产", "parent": "669"},
"787": {"name": "非金属矿产", "parent": "669"},
"788": {"name": "水气矿产", "parent": "669"},
"789": {"name": "锅炉", "parent": "775"},
"790": {"name": "发电机", "parent": "775"},
"791": {"name": "汽轮机", "parent": "775"},
"792": {"name": "燃机", "parent": "775"},
"793": {"name": "冷却", "parent": "775"},
"794": {"name": "电力设计院", "parent": "775"},
"795": {"name": "高压输配电", "parent": "777"},
"796": {"name": "中压输配电", "parent": "777"},
"797": {"name": "低压输配电", "parent": "777"},
"798": {"name": "继电保护", "parent": "777"},
"799": {"name": "智能电网", "parent": "777"},
"800": {"name": "小学", "parent": "516"},
"801": {"name": "电动车", "parent": "519"},
"802": {"name": "皮具箱包", "parent": "518"},
"803": {"name": "医药制造", "parent": "522"},
"804": {"name": "电器销售", "parent": "536"},
"805": {"name": "塑料制品", "parent": "527"},
"806": {"name": "公益基金会", "parent": "530"},
"807": {"name": "美发服务", "parent": "525"},
"808": {"name": "农业养殖", "parent": "531"},
"809": {"name": "金融服务", "parent": "513"},
"810": {"name": "商业地产综合体", "parent": "514"},
"811": {"name": "美容服务", "parent": "525"},
"812": {"name": "灯饰", "parent": "518"},
"813": {"name": "油墨颜料产品", "parent": "527"},
"814": {"name": "眼镜制造", "parent": "518"},
"815": {"name": "农业生物技术", "parent": "531"},
"816": {"name": "体育用品", "parent": "518"},
"817": {"name": "保健用品", "parent": "524"},
"818": {"name": "化学化工产品", "parent": "527"},
"819": {"name": "饲料", "parent": "531"},
"821": {"name": "保安服务", "parent": "525"},
"822": {"name": "干细胞技术", "parent": "522"},
"824": {"name": "农药化肥", "parent": "527"},
"825": {"name": "卫生洁具", "parent": "518"},
"826": {"name": "体育器材、场馆", "parent": "518"},
"827": {"name": "饲料加工", "parent": "531"},
"828": {"name": "测绘服务", "parent": "529"},
"830": {"name": "金属船舶制造", "parent": "519"},
"831": {"name": "基因工程", "parent": "522"},
"832": {"name": "花卉服务", "parent": "536"},
"833": {"name": "农业种植", "parent": "531"},
"834": {"name": "皮革制品", "parent": "518"},
"835": {"name": "地理信息加工服务", "parent": "529"},
"836": {"name": "机器人", "parent": "519"},
"837": {"name": "礼品", "parent": "518"},
"838": {"name": "理发及美容服务", "parent": "525"},
"839": {"name": "其他清洁服务", "parent": "525"},
"840": {"name": "硅胶材料", "parent": "527"},
"841": {"name": "茶叶销售", "parent": "518"},
"842": {"name": "彩票活动", "parent": "529"},
"843": {"name": "化妆培训", "parent": "516"},
"844": {"name": "鞋业", "parent": "518"},
"845": {"name": "酒店用品", "parent": "518"},
"846": {"name": "复合材料", "parent": "527"},
"847": {"name": "房地产工程建设", "parent": "548"},
"848": {"name": "知识产权服务", "parent": "559"},
"849": {"name": "新型建材", "parent": "627"},
"850": {"name": "企业投资咨询", "parent": "567"},
"851": {"name": "含乳饮料和植物蛋白饮料制造", "parent": "594"},
"852": {"name": "汽车检测设备", "parent": "629"},
"853": {"name": "手机通讯器材", "parent": "417"},
"854": {"name": "环保材料", "parent": "672"},
"855": {"name": "交通设施", "parent": "554"},
"856": {"name": "电子器件", "parent": "419"},
"857": {"name": "啤酒", "parent": "594"},
"858": {"name": "生态旅游", "parent": "657"},
"859": {"name": "自动化设备", "parent": "626"},
"860": {"name": "软件开发", "parent": "414"},
"861": {"name": "葡萄酒销售", "parent": "594"},
"862": {"name": "钢材", "parent": "633"},
"863": {"name": "餐饮培训", "parent": "656"},
"864": {"name": "速冻食品", "parent": "593"},
"865": {"name": "空气环保", "parent": "672"},
"866": {"name": "互联网房地产经纪服务", "parent": "550"},
"867": {"name": "食品添加剂", "parent": "593"},
"868": {"name": "演艺传播", "parent": "585"},
"869": {"name": "信用卡", "parent": "537"},
"870": {"name": "报纸期刊广告", "parent": "579"},
"871": {"name": "摄影", "parent": "525"},
"872": {"name": "手机软件", "parent": "414"},
"873": {"name": "地坪建材", "parent": "627"},
"874": {"name": "企业管理咨询", "parent": "567"},
"875": {"name": "幼儿教育", "parent": "570"},
"876": {"name": "系统集成", "parent": "416"},
"877": {"name": "皮革服饰", "parent": "597"},
"878": {"name": "保健食品", "parent": "593"},
"879": {"name": "叉车", "parent": "620"},
"880": {"name": "厨卫电器", "parent": "601"},
"882": {"name": "地暖设备", "parent": "627"},
"883": {"name": "钢结构制造", "parent": "548"},
"884": {"name": "投影机", "parent": "606"},
"885": {"name": "啤酒销售", "parent": "594"},
"886": {"name": "度假村旅游", "parent": "657"},
"887": {"name": "电力元件设备", "parent": "626"},
"888": {"name": "管理软件", "parent": "414"},
"889": {"name": "轴承", "parent": "628"},
"890": {"name": "餐饮设备", "parent": "656"},
"891": {"name": "肉制品及副产品加工", "parent": "593"},
"892": {"name": "艺术收藏品投资交易", "parent": "584"},
"893": {"name": "净水器", "parent": "601"},
"894": {"name": "进口食品", "parent": "593"},
"895": {"name": "娱乐文化传播", "parent": "585"},
"896": {"name": "文化传播", "parent": "585"},
"897": {"name": "商旅传媒", "parent": "580"},
"898": {"name": "广告设计制作", "parent": "579"},
"899": {"name": "金属丝绳及其制品制造", "parent": "627"},
"900": {"name": "建筑涂料", "parent": "627"},
"901": {"name": "抵押贷款", "parent": "543"},
"902": {"name": "早教", "parent": "570"},
"903": {"name": "电影放映", "parent": "583"},
"904": {"name": "内衣服饰", "parent": "597"},
"905": {"name": "无线网络通信", "parent": "418"},
"906": {"name": "记忆卡", "parent": "415"},
"907": {"name": "女装服饰", "parent": "597"},
"908": {"name": "建筑机械", "parent": "620"},
"909": {"name": "制冷电器", "parent": "601"},
"910": {"name": "通信设备", "parent": "417"},
"911": {"name": "空调设备", "parent": "601"},
"912": {"name": "建筑装饰", "parent": "553"},
"913": {"name": "办公设备", "parent": "603"},
"916": {"name": "数据处理软件", "parent": "414"},
"917": {"name": "葡萄酒贸易", "parent": "594"},
"918": {"name": "通讯器材", "parent": "417"},
"919": {"name": "铜业", "parent": "633"},
"920": {"name": "食堂", "parent": "656"},
"921": {"name": "糖果零食", "parent": "593"},
"922": {"name": "文化艺术传播", "parent": "584"},
"923": {"name": "太阳能电器", "parent": "601"},
"924": {"name": "药品零售", "parent": "645"},
"925": {"name": "果蔬食品", "parent": "593"},
"926": {"name": "文化活动策划", "parent": "585"},
"928": {"name": "汽车广告", "parent": "657"},
"929": {"name": "条码设备", "parent": "630"},
"930": {"name": "建筑石材", "parent": "627"},
"931": {"name": "贵金属", "parent": "545"},
"932": {"name": "体育", "parent": "660"},
"933": {"name": "金融信息服务", "parent": "414"},
"934": {"name": "玻璃建材", "parent": "627"},
"935": {"name": "家教", "parent": "569"},
"936": {"name": "歌舞厅娱乐活动", "parent": "586"},
"937": {"name": "计算机服务器", "parent": "415"},
"938": {"name": "管道", "parent": "627"},
"939": {"name": "婴幼儿服饰", "parent": "597"},
"940": {"name": "热水器", "parent": "601"},
"941": {"name": "计算机及零部件制造", "parent": "415"},
"942": {"name": "钢铁贸易", "parent": "633"},
"944": {"name": "包装材料", "parent": "632"},
"945": {"name": "计算机办公设备", "parent": "603"},
"946": {"name": "白酒", "parent": "594"},
"948": {"name": "发动机", "parent": "620"},
"949": {"name": "快餐服务", "parent": "656"},
"950": {"name": "酒类销售", "parent": "594"},
"951": {"name": "电子产品、机电设备", "parent": "626"},
"952": {"name": "激光设备", "parent": "626"},
"953": {"name": "餐饮策划", "parent": "656"},
"954": {"name": "饮料、食品", "parent": "594"},
"955": {"name": "文化娱乐经纪", "parent": "585"},
"956": {"name": "天然气", "parent": "665"},
"957": {"name": "农副食品", "parent": "593"},
"958": {"name": "艺术表演", "parent": "585"},
"959": {"name": "石膏、水泥制品及类似制品制造", "parent": "627"},
"960": {"name": "橱柜", "parent": "602"},
"961": {"name": "管理培训", "parent": "577"},
"962": {"name": "男装服饰", "parent": "597"},
"963": {"name": "化肥制造", "parent": "675"},
"964": {"name": "童装服饰", "parent": "597"},
"965": {"name": "电源电池", "parent": "626"},
"966": {"name": "家电维修", "parent": "664"},
"967": {"name": "光电子器件", "parent": "419"},
"968": {"name": "旅行社服务", "parent": "657"},
"969": {"name": "电线、电缆制造", "parent": "626"},
"970": {"name": "软件开发、信息系统集成", "parent": "419"},
"971": {"name": "白酒制造", "parent": "594"},
"973": {"name": "甜品服务", "parent": "656"},
"974": {"name": "糕点、面包制造", "parent": "593"},
"975": {"name": "木工机械", "parent": "620"},
"976": {"name": "酒吧服务", "parent": "656"},
"977": {"name": "火腿肠", "parent": "593"},
"978": {"name": "广告策划推广", "parent": "579"},
"979": {"name": "新能源产品和生产装备制造", "parent": "667"},
"980": {"name": "调味品", "parent": "593"},
"981": {"name": "礼仪表演", "parent": "585"},
"982": {"name": "劳务派遣", "parent": "560"},
"983": {"name": "建材零售", "parent": "627"},
"984": {"name": "商品交易中心", "parent": "545"},
"985": {"name": "体育推广", "parent": "585"},
"986": {"name": "茶饮料及其他饮料制造", "parent": "594"},
"987": {"name": "金属建材", "parent": "627"},
"988": {"name": "职业技能培训", "parent": "571"},
"989": {"name": "网吧活动", "parent": "586"},
"990": {"name": "洗衣服务", "parent": "658"},
"991": {"name": "管道工程", "parent": "554"},
"992": {"name": "通信工程", "parent": "417"},
"993": {"name": "电子元器件", "parent": "626"},
"994": {"name": "电子设备", "parent": "419"},
"995": {"name": "茶馆服务", "parent": "656"},
"996": {"name": "旅游开发", "parent": "657"},
"997": {"name": "视频通讯", "parent": "417"},
"998": {"name": "白酒销售", "parent": "594"},
"1000": {"name": "咖啡馆服务", "parent": "656"},
"1001": {"name": "食品零售", "parent": "593"},
"1002": {"name": "健康疗养旅游", "parent": "655"},
"1003": {"name": "粮油食品", "parent": "593"},
"1004": {"name": "儿童教育影视", "parent": "583"},
"1005": {"name": "新能源发电", "parent": "667"},
"1006": {"name": "旅游策划", "parent": "657"},
"1007": {"name": "绘画", "parent": "575"},
"1008": {"name": "方便面及其他方便食品", "parent": "593"},
"1009": {"name": "房地产经纪", "parent": "550"},
"1010": {"name": "母婴家政", "parent": "661"},
"1011": {"name": "居家养老健康服务", "parent": "661"},
"1012": {"name": "文化艺术投资", "parent": "545"},
"1013": {"name": "运动健身", "parent": "660"},
"1014": {"name": "瓶(罐)装饮用水制造", "parent": "594"},
"1015": {"name": "金属门窗", "parent": "627"},
"1016": {"name": "机动车检测", "parent": "563"},
"1017": {"name": "货物运输", "parent": "634"},
"1018": {"name": "服饰专卖", "parent": "690"},
"1019": {"name": "酒店服装", "parent": "597"},
"1020": {"name": "通讯软件", "parent": "417"},
"1021": {"name": "消防工程", "parent": "554"},
"1022": {"name": "嵌入式电子系统", "parent": "419"},
"1023": {"name": "航空票务", "parent": "636"},
"1024": {"name": "电气设备", "parent": "626"},
"1025": {"name": "酒业贸易", "parent": "594"},
"1027": {"name": "其他饮料及冷饮服务", "parent": "656"},
"1028": {"name": "乳制品", "parent": "593"},
"1029": {"name": "新闻期刊出版", "parent": "588"},
"1030": {"name": "水污染治理", "parent": "672"},
"1031": {"name": "谷物食品", "parent": "593"},
"1032": {"name": "数字动漫设计制造服务", "parent": "590"},
"1033": {"name": "医院", "parent": "646"},
"1034": {"name": "旅游广告", "parent": "657"},
"1035": {"name": "办公家具", "parent": "602"},
"1036": {"name": "房地产营销策划", "parent": "550"},
"1037": {"name": "保洁家政", "parent": "661"},
"1038": {"name": "水泥制造", "parent": "627"},
"1039": {"name": "市场研究咨询", "parent": "567"},
"1040": {"name": "驾校", "parent": "571"},
"1041": {"name": "正餐服务", "parent": "656"},
"1043": {"name": "机动车燃油", "parent": "665"},
"1044": {"name": "食品", "parent": "593"},
"1045": {"name": "新能源汽车", "parent": "629"},
"1046": {"name": "手机无线网络推广", "parent": "417"},
"1047": {"name": "环保设备", "parent": "672"},
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/entities/corporations.py | deepdoc/parser/resume/entities/corporations.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import json
import os
import pandas as pd
from rag.nlp import rag_tokenizer
from . import regions
current_file_path = os.path.dirname(os.path.abspath(__file__))
GOODS = pd.read_csv(
os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0
).fillna(0)
GOODS["cid"] = GOODS["cid"].astype(str)
GOODS = GOODS.set_index(["cid"])
CORP_TKS = json.load(
open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r",encoding="utf-8")
)
GOOD_CORP = json.load(open(os.path.join(current_file_path, "res/good_corp.json"), "r",encoding="utf-8"))
CORP_TAG = json.load(open(os.path.join(current_file_path, "res/corp_tag.json"), "r",encoding="utf-8"))
def baike(cid, default_v=0):
global GOODS
try:
return GOODS.loc[str(cid), "len"]
except Exception:
pass
return default_v
def corpNorm(nm, add_region=True):
global CORP_TKS
if not nm or not isinstance(nm, str):
return ""
nm = rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(nm)).lower()
nm = re.sub(r"&", "&", nm)
nm = re.sub(r"[\(\)()\+'\"\t \*\\【】-]+", " ", nm)
nm = re.sub(
r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, count=10000, flags=re.IGNORECASE
)
nm = re.sub(
r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$",
"",
nm,
count=10000,
flags=re.IGNORECASE,
)
if not nm or (len(nm) < 5 and not regions.isName(nm[0:2])):
return nm
tks = rag_tokenizer.tokenize(nm).split()
reg = [t for i, t in enumerate(tks) if regions.isName(t) and (t != "中国" or i > 0)]
nm = ""
for t in tks:
if regions.isName(t) or t in CORP_TKS:
continue
if re.match(r"[0-9a-zA-Z\\,.]+", t) and re.match(r".*[0-9a-zA-Z\,.]+$", nm):
nm += " "
nm += t
r = re.search(r"^([^a-z0-9 \(\)&]{2,})[a-z ]{4,}$", nm.strip())
if r:
nm = r.group(1)
r = re.search(r"^([a-z ]{3,})[^a-z0-9 \(\)&]{2,}$", nm.strip())
if r:
nm = r.group(1)
return nm.strip() + (("" if not reg else "(%s)" % reg[0]) if add_region else "")
def rmNoise(n):
n = re.sub(r"[\((][^()()]+[))]", "", n)
n = re.sub(r"[,. &()()]+", "", n)
return n
GOOD_CORP = set([corpNorm(rmNoise(c), False) for c in GOOD_CORP])
for c, v in CORP_TAG.items():
cc = corpNorm(rmNoise(c), False)
if not cc:
logging.debug(c)
CORP_TAG = {corpNorm(rmNoise(c), False): v for c, v in CORP_TAG.items()}
def is_good(nm):
global GOOD_CORP
if nm.find("外派") >= 0:
return False
nm = rmNoise(nm)
nm = corpNorm(nm, False)
for n in GOOD_CORP:
if re.match(r"[0-9a-zA-Z]+$", n):
if n == nm:
return True
elif nm.find(n) >= 0:
return True
return False
def corp_tag(nm):
global CORP_TAG
nm = rmNoise(nm)
nm = corpNorm(nm, False)
for n in CORP_TAG.keys():
if re.match(r"[0-9a-zA-Z., ]+$", n):
if n == nm:
return CORP_TAG[n]
elif nm.find(n) >= 0:
if len(n) < 3 and len(nm) / len(n) >= 2:
continue
return CORP_TAG[n]
return []
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/entities/degrees.py | deepdoc/parser/resume/entities/degrees.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TBL = {
"94": "EMBA",
"6": "MBA",
"95": "MPA",
"92": "专升本",
"4": "专科",
"90": "中专",
"91": "中技",
"86": "初中",
"3": "博士",
"10": "博士后",
"1": "本科",
"2": "硕士",
"87": "职高",
"89": "高中",
}
TBL_ = {v: k for k, v in TBL.items()}
def get_name(id):
return TBL.get(str(id), "")
def get_id(nm):
if not nm:
return ""
return TBL_.get(nm.upper().strip(), "")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/deepdoc/parser/resume/entities/__init__.py | deepdoc/parser/resume/entities/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/services.py | admin/server/services.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import re
from werkzeug.security import check_password_hash
from common.constants import ActiveEnum
from api.db.services import UserService
from api.db.joint_services.user_account_service import create_new_user, delete_user_data
from api.db.services.canvas_service import UserCanvasService
from api.db.services.user_service import TenantService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.system_settings_service import SystemSettingsService
from api.utils.crypt import decrypt
from api.utils import health_utils
from api.common.exceptions import AdminException, UserAlreadyExistsError, UserNotFoundError
from config import SERVICE_CONFIGS
class UserMgr:
@staticmethod
def get_all_users():
users = UserService.get_all_users()
result = []
for user in users:
result.append({
'email': user.email,
'nickname': user.nickname,
'create_date': user.create_date,
'is_active': user.is_active,
'is_superuser': user.is_superuser,
})
return result
@staticmethod
def get_user_details(username):
# use email to query
users = UserService.query_user_by_email(username)
result = []
for user in users:
result.append({
'avatar': user.avatar,
'email': user.email,
'language': user.language,
'last_login_time': user.last_login_time,
'is_active': user.is_active,
'is_anonymous': user.is_anonymous,
'login_channel': user.login_channel,
'status': user.status,
'is_superuser': user.is_superuser,
'create_date': user.create_date,
'update_date': user.update_date
})
return result
@staticmethod
def create_user(username, password, role="user") -> dict:
# Validate the email address
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", username):
raise AdminException(f"Invalid email address: {username}!")
# Check if the email address is already used
if UserService.query(email=username):
raise UserAlreadyExistsError(username)
# Construct user info data
user_info_dict = {
"email": username,
"nickname": "", # ask user to edit it manually in settings.
"password": decrypt(password),
"login_channel": "password",
"is_superuser": role == "admin",
}
return create_new_user(user_info_dict)
@staticmethod
def delete_user(username):
# use email to delete
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
if len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
usr = user_list[0]
return delete_user_data(usr.id)
@staticmethod
def update_user_password(username, new_password) -> str:
# use email to find user. check exist and unique.
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
elif len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
# check new_password different from old.
usr = user_list[0]
psw = decrypt(new_password)
if check_password_hash(usr.password, psw):
return "Same password, no need to update!"
# update password
UserService.update_user_password(usr.id, psw)
return "Password updated successfully!"
@staticmethod
def update_user_activate_status(username, activate_status: str):
# use email to find user. check exist and unique.
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
elif len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
# check activate status different from new
usr = user_list[0]
# format activate_status before handle
_activate_status = activate_status.lower()
target_status = {
'on': ActiveEnum.ACTIVE.value,
'off': ActiveEnum.INACTIVE.value,
}.get(_activate_status)
if not target_status:
raise AdminException(f"Invalid activate_status: {activate_status}")
if target_status == usr.is_active:
return f"User activate status is already {_activate_status}!"
# update is_active
UserService.update_user(usr.id, {"is_active": target_status})
return f"Turn {_activate_status} user activate status successfully!"
@staticmethod
def grant_admin(username: str):
# use email to find user. check exist and unique.
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
elif len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
# check activate status different from new
usr = user_list[0]
if usr.is_superuser:
return f"{usr} is already superuser!"
# update is_active
UserService.update_user(usr.id, {"is_superuser": True})
return "Grant successfully!"
@staticmethod
def revoke_admin(username: str):
# use email to find user. check exist and unique.
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
elif len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
# check activate status different from new
usr = user_list[0]
if not usr.is_superuser:
return f"{usr} isn't superuser, yet!"
# update is_active
UserService.update_user(usr.id, {"is_superuser": False})
return "Revoke successfully!"
class UserServiceMgr:
@staticmethod
def get_user_datasets(username):
# use email to find user.
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
elif len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
# find tenants
usr = user_list[0]
tenants = TenantService.get_joined_tenants_by_user_id(usr.id)
tenant_ids = [m["tenant_id"] for m in tenants]
# filter permitted kb and owned kb
return KnowledgebaseService.get_all_kb_by_tenant_ids(tenant_ids, usr.id)
@staticmethod
def get_user_agents(username):
# use email to find user.
user_list = UserService.query_user_by_email(username)
if not user_list:
raise UserNotFoundError(username)
elif len(user_list) > 1:
raise AdminException(f"Exist more than 1 user: {username}!")
# find tenants
usr = user_list[0]
tenants = TenantService.get_joined_tenants_by_user_id(usr.id)
tenant_ids = [m["tenant_id"] for m in tenants]
# filter permitted agents and owned agents
res = UserCanvasService.get_all_agents_by_tenant_ids(tenant_ids, usr.id)
return [{
'title': r['title'],
'permission': r['permission'],
'canvas_category': r['canvas_category'].split('_')[0],
'avatar': r['avatar']
} for r in res]
class ServiceMgr:
@staticmethod
def get_all_services():
doc_engine = os.getenv('DOC_ENGINE', 'elasticsearch')
result = []
configs = SERVICE_CONFIGS.configs
for service_id, config in enumerate(configs):
config_dict = config.to_dict()
if config_dict['service_type'] == 'retrieval':
if config_dict['extra']['retrieval_type'] != doc_engine:
continue
try:
service_detail = ServiceMgr.get_service_details(service_id)
if "status" in service_detail:
config_dict['status'] = service_detail['status']
else:
config_dict['status'] = 'timeout'
except Exception as e:
logging.warning(f"Can't get service details, error: {e}")
config_dict['status'] = 'timeout'
if not config_dict['host']:
config_dict['host'] = '-'
if not config_dict['port']:
config_dict['port'] = '-'
result.append(config_dict)
return result
@staticmethod
def get_services_by_type(service_type_str: str):
raise AdminException("get_services_by_type: not implemented")
@staticmethod
def get_service_details(service_id: int):
service_idx = int(service_id)
configs = SERVICE_CONFIGS.configs
if service_idx < 0 or service_idx >= len(configs):
raise AdminException(f"invalid service_index: {service_idx}")
service_config = configs[service_idx]
service_info = {'name': service_config.name, 'detail_func_name': service_config.detail_func_name}
detail_func = getattr(health_utils, service_info.get('detail_func_name'))
res = detail_func()
res.update({'service_name': service_info.get('name')})
return res
@staticmethod
def shutdown_service(service_id: int):
raise AdminException("shutdown_service: not implemented")
@staticmethod
def restart_service(service_id: int):
raise AdminException("restart_service: not implemented")
class SettingsMgr:
@staticmethod
def get_all():
settings = SystemSettingsService.get_all()
result = []
for setting in settings:
result.append({
'name': setting.name,
'source': setting.source,
'data_type': setting.data_type,
'value': setting.value,
})
return result
@staticmethod
def get_by_name(name: str):
settings = SystemSettingsService.get_by_name(name)
if len(settings) == 0:
raise AdminException(f"Can't get setting: {name}")
result = []
for setting in settings:
result.append({
'name': setting.name,
'source': setting.source,
'data_type': setting.data_type,
'value': setting.value,
})
return result
@staticmethod
def update_by_name(name: str, value: str):
settings = SystemSettingsService.get_by_name(name)
if len(settings) == 1:
setting = settings[0]
setting.value = value
setting_dict = setting.to_dict()
SystemSettingsService.update_by_name(name, setting_dict)
elif len(settings) > 1:
raise AdminException(f"Can't update more than 1 setting: {name}")
else:
raise AdminException(f"No sett"
f"ing: {name}") | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/models.py | admin/server/models.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/exceptions.py | admin/server/exceptions.py | class AdminException(Exception):
def __init__(self, message, code=400):
super().__init__(message)
self.code = code
self.message = message
class UserNotFoundError(AdminException):
def __init__(self, username):
super().__init__(f"User '{username}' not found", 404)
class UserAlreadyExistsError(AdminException):
def __init__(self, username):
super().__init__(f"User '{username}' already exists", 409)
class CannotDeleteAdminError(AdminException):
def __init__(self):
super().__init__("Cannot delete admin account", 403) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/config.py | admin/server/config.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
from enum import Enum
from pydantic import BaseModel
from typing import Any
from common.config_utils import read_config
from urllib.parse import urlparse
class BaseConfig(BaseModel):
id: int
name: str
host: str
port: int
service_type: str
detail_func_name: str
def to_dict(self) -> dict[str, Any]:
return {'id': self.id, 'name': self.name, 'host': self.host, 'port': self.port,
'service_type': self.service_type}
class ServiceConfigs:
configs = list[BaseConfig]
def __init__(self):
self.configs = []
self.lock = threading.Lock()
SERVICE_CONFIGS = ServiceConfigs
class ServiceType(Enum):
METADATA = "metadata"
RETRIEVAL = "retrieval"
MESSAGE_QUEUE = "message_queue"
RAGFLOW_SERVER = "ragflow_server"
TASK_EXECUTOR = "task_executor"
FILE_STORE = "file_store"
class MetaConfig(BaseConfig):
meta_type: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['meta_type'] = self.meta_type
result['extra'] = extra_dict
return result
class MySQLConfig(MetaConfig):
username: str
password: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['username'] = self.username
extra_dict['password'] = self.password
result['extra'] = extra_dict
return result
class PostgresConfig(MetaConfig):
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
return result
class RetrievalConfig(BaseConfig):
retrieval_type: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['retrieval_type'] = self.retrieval_type
result['extra'] = extra_dict
return result
class InfinityConfig(RetrievalConfig):
db_name: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['db_name'] = self.db_name
result['extra'] = extra_dict
return result
class ElasticsearchConfig(RetrievalConfig):
username: str
password: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['username'] = self.username
extra_dict['password'] = self.password
result['extra'] = extra_dict
return result
class MessageQueueConfig(BaseConfig):
mq_type: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['mq_type'] = self.mq_type
result['extra'] = extra_dict
return result
class RedisConfig(MessageQueueConfig):
database: int
password: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['database'] = self.database
extra_dict['password'] = self.password
result['extra'] = extra_dict
return result
class RabbitMQConfig(MessageQueueConfig):
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
return result
class RAGFlowServerConfig(BaseConfig):
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
return result
class TaskExecutorConfig(BaseConfig):
message_queue_type: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
result['extra']['message_queue_type'] = self.message_queue_type
return result
class FileStoreConfig(BaseConfig):
store_type: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['store_type'] = self.store_type
result['extra'] = extra_dict
return result
class MinioConfig(FileStoreConfig):
user: str
password: str
def to_dict(self) -> dict[str, Any]:
result = super().to_dict()
if 'extra' not in result:
result['extra'] = dict()
extra_dict = result['extra'].copy()
extra_dict['user'] = self.user
extra_dict['password'] = self.password
result['extra'] = extra_dict
return result
def load_configurations(config_path: str) -> list[BaseConfig]:
raw_configs = read_config(config_path)
configurations = []
ragflow_count = 0
id_count = 0
for k, v in raw_configs.items():
match k:
case "ragflow":
name: str = f'ragflow_{ragflow_count}'
host: str = v['host']
http_port: int = v['http_port']
config = RAGFlowServerConfig(id=id_count, name=name, host=host, port=http_port,
service_type="ragflow_server",
detail_func_name="check_ragflow_server_alive")
configurations.append(config)
id_count += 1
case "es":
name: str = 'elasticsearch'
url = v['hosts']
parsed = urlparse(url)
host: str = parsed.hostname
port: int = parsed.port
username: str = v.get('username')
password: str = v.get('password')
config = ElasticsearchConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval",
retrieval_type="elasticsearch",
username=username, password=password,
detail_func_name="get_es_cluster_stats")
configurations.append(config)
id_count += 1
case "infinity":
name: str = 'infinity'
url = v['uri']
parts = url.split(':', 1)
host = parts[0]
port = int(parts[1])
database: str = v.get('db_name', 'default_db')
config = InfinityConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval",
retrieval_type="infinity",
db_name=database, detail_func_name="get_infinity_status")
configurations.append(config)
id_count += 1
case "minio":
name: str = 'minio'
url = v['host']
parts = url.split(':', 1)
host = parts[0]
port = int(parts[1])
user = v.get('user')
password = v.get('password')
config = MinioConfig(id=id_count, name=name, host=host, port=port, user=user, password=password,
service_type="file_store",
store_type="minio", detail_func_name="check_minio_alive")
configurations.append(config)
id_count += 1
case "redis":
name: str = 'redis'
url = v['host']
parts = url.split(':', 1)
host = parts[0]
port = int(parts[1])
password = v.get('password')
db: int = v.get('db')
config = RedisConfig(id=id_count, name=name, host=host, port=port, password=password, database=db,
service_type="message_queue", mq_type="redis", detail_func_name="get_redis_info")
configurations.append(config)
id_count += 1
case "mysql":
name: str = 'mysql'
host: str = v.get('host')
port: int = v.get('port')
username = v.get('user')
password = v.get('password')
config = MySQLConfig(id=id_count, name=name, host=host, port=port, username=username, password=password,
service_type="meta_data", meta_type="mysql", detail_func_name="get_mysql_status")
configurations.append(config)
id_count += 1
case "admin":
pass
case "task_executor":
name: str = 'task_executor'
host: str = v.get('host', '')
port: int = v.get('port', 0)
message_queue_type: str = v.get('message_queue_type')
config = TaskExecutorConfig(id=id_count, name=name, host=host, port=port, message_queue_type=message_queue_type,
service_type="task_executor", detail_func_name="check_task_executor_alive")
configurations.append(config)
id_count += 1
case _:
logging.warning(f"Unknown configuration key: {k}")
continue
return configurations
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/responses.py | admin/server/responses.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import jsonify
def success_response(data=None, message="Success", code=0):
return jsonify({
"code": code,
"message": message,
"data": data
}), 200
def error_response(message="Error", code=-1, data=None):
return jsonify({
"code": code,
"message": message,
"data": data
}), 400
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/routes.py | admin/server/routes.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import secrets
from flask import Blueprint, request
from flask_login import current_user, login_required, logout_user
from auth import login_verify, login_admin, check_admin_auth
from responses import success_response, error_response
from services import UserMgr, ServiceMgr, UserServiceMgr, SettingsMgr
from roles import RoleMgr
from api.common.exceptions import AdminException
from common.versions import get_ragflow_version
admin_bp = Blueprint('admin', __name__, url_prefix='/api/v1/admin')
@admin_bp.route('/ping', methods=['GET'])
def ping():
return success_response('PONG')
@admin_bp.route('/login', methods=['POST'])
def login():
if not request.json:
return error_response('Authorize admin failed.' ,400)
try:
email = request.json.get("email", "")
password = request.json.get("password", "")
return login_admin(email, password)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/logout', methods=['GET'])
@login_required
def logout():
try:
current_user.access_token = f"INVALID_{secrets.token_hex(16)}"
current_user.save()
logout_user()
return success_response(True)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/auth', methods=['GET'])
@login_verify
def auth_admin():
try:
return success_response(None, "Admin is authorized", 0)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users', methods=['GET'])
@login_required
@check_admin_auth
def list_users():
try:
users = UserMgr.get_all_users()
return success_response(users, "Get all users", 0)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users', methods=['POST'])
@login_required
@check_admin_auth
def create_user():
try:
data = request.get_json()
if not data or 'username' not in data or 'password' not in data:
return error_response("Username and password are required", 400)
username = data['username']
password = data['password']
role = data.get('role', 'user')
res = UserMgr.create_user(username, password, role)
if res["success"]:
user_info = res["user_info"]
user_info.pop("password") # do not return password
return success_response(user_info, "User created successfully")
else:
return error_response("create user failed")
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e))
@admin_bp.route('/users/<username>', methods=['DELETE'])
@login_required
@check_admin_auth
def delete_user(username):
try:
res = UserMgr.delete_user(username)
if res["success"]:
return success_response(None, res["message"])
else:
return error_response(res["message"])
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>/password', methods=['PUT'])
@login_required
@check_admin_auth
def change_password(username):
try:
data = request.get_json()
if not data or 'new_password' not in data:
return error_response("New password is required", 400)
new_password = data['new_password']
msg = UserMgr.update_user_password(username, new_password)
return success_response(None, msg)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>/activate', methods=['PUT'])
@login_required
@check_admin_auth
def alter_user_activate_status(username):
try:
data = request.get_json()
if not data or 'activate_status' not in data:
return error_response("Activation status is required", 400)
activate_status = data['activate_status']
msg = UserMgr.update_user_activate_status(username, activate_status)
return success_response(None, msg)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>/admin', methods=['PUT'])
@login_required
@check_admin_auth
def grant_admin(username):
try:
if current_user.email == username:
return error_response(f"can't grant current user: {username}", 409)
msg = UserMgr.grant_admin(username)
return success_response(None, msg)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>/admin', methods=['DELETE'])
@login_required
@check_admin_auth
def revoke_admin(username):
try:
if current_user.email == username:
return error_response(f"can't grant current user: {username}", 409)
msg = UserMgr.revoke_admin(username)
return success_response(None, msg)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>', methods=['GET'])
@login_required
@check_admin_auth
def get_user_details(username):
try:
user_details = UserMgr.get_user_details(username)
return success_response(user_details)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>/datasets', methods=['GET'])
@login_required
@check_admin_auth
def get_user_datasets(username):
try:
datasets_list = UserServiceMgr.get_user_datasets(username)
return success_response(datasets_list)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<username>/agents', methods=['GET'])
@login_required
@check_admin_auth
def get_user_agents(username):
try:
agents_list = UserServiceMgr.get_user_agents(username)
return success_response(agents_list)
except AdminException as e:
return error_response(e.message, e.code)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/services', methods=['GET'])
@login_required
@check_admin_auth
def get_services():
try:
services = ServiceMgr.get_all_services()
return success_response(services, "Get all services", 0)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/service_types/<service_type>', methods=['GET'])
@login_required
@check_admin_auth
def get_services_by_type(service_type_str):
try:
services = ServiceMgr.get_services_by_type(service_type_str)
return success_response(services)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/services/<service_id>', methods=['GET'])
@login_required
@check_admin_auth
def get_service(service_id):
try:
services = ServiceMgr.get_service_details(service_id)
return success_response(services)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/services/<service_id>', methods=['DELETE'])
@login_required
@check_admin_auth
def shutdown_service(service_id):
try:
services = ServiceMgr.shutdown_service(service_id)
return success_response(services)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/services/<service_id>', methods=['PUT'])
@login_required
@check_admin_auth
def restart_service(service_id):
try:
services = ServiceMgr.restart_service(service_id)
return success_response(services)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles', methods=['POST'])
@login_required
@check_admin_auth
def create_role():
try:
data = request.get_json()
if not data or 'role_name' not in data:
return error_response("Role name is required", 400)
role_name: str = data['role_name']
description: str = data['description']
res = RoleMgr.create_role(role_name, description)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles/<role_name>', methods=['PUT'])
@login_required
@check_admin_auth
def update_role(role_name: str):
try:
data = request.get_json()
if not data or 'description' not in data:
return error_response("Role description is required", 400)
description: str = data['description']
res = RoleMgr.update_role_description(role_name, description)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles/<role_name>', methods=['DELETE'])
@login_required
@check_admin_auth
def delete_role(role_name: str):
try:
res = RoleMgr.delete_role(role_name)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles', methods=['GET'])
@login_required
@check_admin_auth
def list_roles():
try:
res = RoleMgr.list_roles()
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles/<role_name>/permission', methods=['GET'])
@login_required
@check_admin_auth
def get_role_permission(role_name: str):
try:
res = RoleMgr.get_role_permission(role_name)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles/<role_name>/permission', methods=['POST'])
@login_required
@check_admin_auth
def grant_role_permission(role_name: str):
try:
data = request.get_json()
if not data or 'actions' not in data or 'resource' not in data:
return error_response("Permission is required", 400)
actions: list = data['actions']
resource: str = data['resource']
res = RoleMgr.grant_role_permission(role_name, actions, resource)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/roles/<role_name>/permission', methods=['DELETE'])
@login_required
@check_admin_auth
def revoke_role_permission(role_name: str):
try:
data = request.get_json()
if not data or 'actions' not in data or 'resource' not in data:
return error_response("Permission is required", 400)
actions: list = data['actions']
resource: str = data['resource']
res = RoleMgr.revoke_role_permission(role_name, actions, resource)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<user_name>/role', methods=['PUT'])
@login_required
@check_admin_auth
def update_user_role(user_name: str):
try:
data = request.get_json()
if not data or 'role_name' not in data:
return error_response("Role name is required", 400)
role_name: str = data['role_name']
res = RoleMgr.update_user_role(user_name, role_name)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/users/<user_name>/permission', methods=['GET'])
@login_required
@check_admin_auth
def get_user_permission(user_name: str):
try:
res = RoleMgr.get_user_permission(user_name)
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/variables', methods=['PUT'])
@login_required
@check_admin_auth
def set_variable():
try:
data = request.get_json()
if not data and 'var_name' not in data:
return error_response("Var name is required", 400)
if 'var_value' not in data:
return error_response("Var value is required", 400)
var_name: str = data['var_name']
var_value: str = data['var_value']
SettingsMgr.update_by_name(var_name, var_value)
return success_response(None, "Set variable successfully")
except AdminException as e:
return error_response(str(e), 400)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/variables', methods=['GET'])
@login_required
@check_admin_auth
def get_variable():
try:
if request.content_length is None or request.content_length == 0:
# list variables
res = list(SettingsMgr.get_all())
return success_response(res)
# get var
data = request.get_json()
if not data and 'var_name' not in data:
return error_response("Var name is required", 400)
var_name: str = data['var_name']
res = SettingsMgr.get_by_name(var_name)
return success_response(res)
except AdminException as e:
return error_response(str(e), 400)
except Exception as e:
return error_response(str(e), 500)
@admin_bp.route('/version', methods=['GET'])
@login_required
@check_admin_auth
def show_version():
try:
res = {"version": get_ragflow_version()}
return success_response(res)
except Exception as e:
return error_response(str(e), 500)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/roles.py | admin/server/roles.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import Dict, Any
from api.common.exceptions import AdminException
class RoleMgr:
@staticmethod
def create_role(role_name: str, description: str):
error_msg = f"not implement: create role: {role_name}, description: {description}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def update_role_description(role_name: str, description: str) -> Dict[str, Any]:
error_msg = f"not implement: update role: {role_name} with description: {description}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def delete_role(role_name: str) -> Dict[str, Any]:
error_msg = f"not implement: drop role: {role_name}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def list_roles() -> Dict[str, Any]:
error_msg = "not implement: list roles"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def get_role_permission(role_name: str) -> Dict[str, Any]:
error_msg = f"not implement: show role {role_name}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def grant_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
error_msg = f"not implement: grant role {role_name} actions: {actions} on {resource}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def revoke_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
error_msg = f"not implement: revoke role {role_name} actions: {actions} on {resource}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def update_user_role(user_name: str, role_name: str) -> Dict[str, Any]:
error_msg = f"not implement: update user role: {user_name} to role {role_name}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def get_user_permission(user_name: str) -> Dict[str, Any]:
error_msg = f"not implement: get user permission: {user_name}"
logging.error(error_msg)
raise AdminException(error_msg)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/auth.py | admin/server/auth.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import uuid
from functools import wraps
from datetime import datetime
from flask import jsonify, request
from flask_login import current_user, login_user
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
from api.common.exceptions import AdminException, UserNotFoundError
from api.common.base64 import encode_to_base64
from api.db.services import UserService
from common.constants import ActiveEnum, StatusEnum
from api.utils.crypt import decrypt
from common.misc_utils import get_uuid
from common.time_utils import current_timestamp, datetime_format, get_format_time
from common.connection_utils import sync_construct_response
from common import settings
def setup_auth(login_manager):
@login_manager.request_loader
def load_user(web_request):
jwt = Serializer(secret_key=settings.SECRET_KEY)
authorization = web_request.headers.get("Authorization")
if authorization:
try:
access_token = str(jwt.loads(authorization))
if not access_token or not access_token.strip():
logging.warning("Authentication attempt with empty access token")
return None
# Access tokens should be UUIDs (32 hex characters)
if len(access_token.strip()) < 32:
logging.warning(f"Authentication attempt with invalid token format: {len(access_token)} chars")
return None
user = UserService.query(
access_token=access_token, status=StatusEnum.VALID.value
)
if user:
if not user[0].access_token or not user[0].access_token.strip():
logging.warning(f"User {user[0].email} has empty access_token in database")
return None
return user[0]
else:
return None
except Exception as e:
logging.warning(f"load_user got exception {e}")
return None
else:
return None
def init_default_admin():
# Verify that at least one active admin user exists. If not, create a default one.
users = UserService.query(is_superuser=True)
if not users:
default_admin = {
"id": uuid.uuid1().hex,
"password": encode_to_base64("admin"),
"nickname": "admin",
"is_superuser": True,
"email": "admin@ragflow.io",
"creator": "system",
"status": "1",
}
if not UserService.save(**default_admin):
raise AdminException("Can't init admin.", 500)
elif not any([u.is_active == ActiveEnum.ACTIVE.value for u in users]):
raise AdminException("No active admin. Please update 'is_active' in db manually.", 500)
def check_admin_auth(func):
@wraps(func)
def wrapper(*args, **kwargs):
user = UserService.filter_by_id(current_user.id)
if not user:
raise UserNotFoundError(current_user.email)
if not user.is_superuser:
raise AdminException("Not admin", 403)
if user.is_active == ActiveEnum.INACTIVE.value:
raise AdminException(f"User {current_user.email} inactive", 403)
return func(*args, **kwargs)
return wrapper
def login_admin(email: str, password: str):
"""
:param email: admin email
:param password: string before decrypt
"""
users = UserService.query(email=email)
if not users:
raise UserNotFoundError(email)
psw = decrypt(password)
user = UserService.query_user(email, psw)
if not user:
raise AdminException("Email and password do not match!")
if not user.is_superuser:
raise AdminException("Not admin", 403)
if user.is_active == ActiveEnum.INACTIVE.value:
raise AdminException(f"User {email} inactive", 403)
resp = user.to_json()
user.access_token = get_uuid()
login_user(user)
user.update_time = (current_timestamp(),)
user.update_date = (datetime_format(datetime.now()),)
user.last_login_time = get_format_time()
user.save()
msg = "Welcome back!"
return sync_construct_response(data=resp, auth=user.get_id(), message=msg)
def check_admin(username: str, password: str):
users = UserService.query(email=username)
if not users:
logging.info(f"Username: {username} is not registered!")
user_info = {
"id": uuid.uuid1().hex,
"password": encode_to_base64("admin"),
"nickname": "admin",
"is_superuser": True,
"email": "admin@ragflow.io",
"creator": "system",
"status": "1",
}
if not UserService.save(**user_info):
raise AdminException("Can't init admin.", 500)
user = UserService.query_user(username, password)
if user:
return True
else:
return False
def login_verify(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or 'username' not in auth.parameters or 'password' not in auth.parameters:
return jsonify({
"code": 401,
"message": "Authentication required",
"data": None
}), 200
username = auth.parameters['username']
password = auth.parameters['password']
try:
if not check_admin(username, password):
return jsonify({
"code": 500,
"message": "Access denied",
"data": None
}), 200
except Exception:
logging.exception("An error occurred during admin login verification.")
return jsonify({
"code": 500,
"message": "An internal server error occurred."
}), 200
return f(*args, **kwargs)
return decorated
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/server/admin_server.py | admin/server/admin_server.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import signal
import logging
import time
import threading
import traceback
import faulthandler
from flask import Flask
from flask_login import LoginManager
from werkzeug.serving import run_simple
from routes import admin_bp
from common.log_utils import init_root_logger
from common.constants import SERVICE_CONF
from common.config_utils import show_configs
from common import settings
from config import load_configurations, SERVICE_CONFIGS
from auth import init_default_admin, setup_auth
from flask_session import Session
from common.versions import get_ragflow_version
stop_event = threading.Event()
if __name__ == '__main__':
faulthandler.enable()
init_root_logger("admin_service")
logging.info(r"""
____ ___ ______________ ___ __ _
/ __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___
/ /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \
/ _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / /
/_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/
""")
app = Flask(__name__)
app.register_blueprint(admin_bp)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
app.config["MAX_CONTENT_LENGTH"] = int(
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
)
Session(app)
logging.info(f'RAGFlow version: {get_ragflow_version()}')
show_configs()
login_manager = LoginManager()
login_manager.init_app(app)
settings.init_settings()
setup_auth(login_manager)
init_default_admin()
SERVICE_CONFIGS.configs = load_configurations(SERVICE_CONF)
try:
logging.info("RAGFlow Admin service start...")
run_simple(
hostname="0.0.0.0",
port=9381,
application=app,
threaded=True,
use_reloader=False,
use_debugger=True,
)
except Exception:
traceback.print_exc()
stop_event.set()
time.sleep(1)
os.kill(os.getpid(), signal.SIGKILL)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/admin/client/admin_client.py | admin/client/admin_client.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import base64
import getpass
from cmd import Cmd
from typing import Any, Dict, List
import requests
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Cryptodome.PublicKey import RSA
from lark import Lark, Transformer, Tree
GRAMMAR = r"""
start: command
command: sql_command | meta_command
sql_command: list_services
| show_service
| startup_service
| shutdown_service
| restart_service
| list_users
| show_user
| drop_user
| alter_user
| create_user
| activate_user
| list_datasets
| list_agents
| create_role
| drop_role
| alter_role
| list_roles
| show_role
| grant_permission
| revoke_permission
| alter_user_role
| show_user_permission
| show_version
| grant_admin
| revoke_admin
| set_variable
| show_variable
| list_variables
// meta command definition
meta_command: "\\" meta_command_name [meta_args]
meta_command_name: /[a-zA-Z?]+/
meta_args: (meta_arg)+
meta_arg: /[^\\s"']+/ | quoted_string
// command definition
LIST: "LIST"i
SERVICES: "SERVICES"i
SHOW: "SHOW"i
CREATE: "CREATE"i
SERVICE: "SERVICE"i
SHUTDOWN: "SHUTDOWN"i
STARTUP: "STARTUP"i
RESTART: "RESTART"i
USERS: "USERS"i
DROP: "DROP"i
USER: "USER"i
ALTER: "ALTER"i
ACTIVE: "ACTIVE"i
ADMIN: "ADMIN"i
PASSWORD: "PASSWORD"i
DATASETS: "DATASETS"i
OF: "OF"i
AGENTS: "AGENTS"i
ROLE: "ROLE"i
ROLES: "ROLES"i
DESCRIPTION: "DESCRIPTION"i
GRANT: "GRANT"i
REVOKE: "REVOKE"i
ALL: "ALL"i
PERMISSION: "PERMISSION"i
TO: "TO"i
FROM: "FROM"i
FOR: "FOR"i
RESOURCES: "RESOURCES"i
ON: "ON"i
SET: "SET"i
VERSION: "VERSION"i
VAR: "VAR"i
VARS: "VARS"i
list_services: LIST SERVICES ";"
show_service: SHOW SERVICE NUMBER ";"
startup_service: STARTUP SERVICE NUMBER ";"
shutdown_service: SHUTDOWN SERVICE NUMBER ";"
restart_service: RESTART SERVICE NUMBER ";"
list_users: LIST USERS ";"
drop_user: DROP USER quoted_string ";"
alter_user: ALTER USER PASSWORD quoted_string quoted_string ";"
show_user: SHOW USER quoted_string ";"
create_user: CREATE USER quoted_string quoted_string ";"
activate_user: ALTER USER ACTIVE quoted_string status ";"
list_datasets: LIST DATASETS OF quoted_string ";"
list_agents: LIST AGENTS OF quoted_string ";"
create_role: CREATE ROLE identifier [DESCRIPTION quoted_string] ";"
drop_role: DROP ROLE identifier ";"
alter_role: ALTER ROLE identifier SET DESCRIPTION quoted_string ";"
list_roles: LIST ROLES ";"
show_role: SHOW ROLE identifier ";"
grant_permission: GRANT action_list ON identifier TO ROLE identifier ";"
revoke_permission: REVOKE action_list ON identifier FROM ROLE identifier ";"
alter_user_role: ALTER USER quoted_string SET ROLE identifier ";"
show_user_permission: SHOW USER PERMISSION quoted_string ";"
grant_admin: GRANT ADMIN quoted_string ";"
revoke_admin: REVOKE ADMIN quoted_string ";"
set_variable: SET VAR identifier identifier ";"
show_variable: SHOW VAR identifier ";"
list_variables: LIST VARS ";"
show_version: SHOW VERSION ";"
action_list: identifier ("," identifier)*
identifier: WORD
quoted_string: QUOTED_STRING
status: WORD
QUOTED_STRING: /'[^']+'/ | /"[^"]+"/
WORD: /[a-zA-Z0-9_\-\.]+/
NUMBER: /[0-9]+/
%import common.WS
%ignore WS
"""
class AdminTransformer(Transformer):
def start(self, items):
return items[0]
def command(self, items):
return items[0]
def list_services(self, items):
result = {"type": "list_services"}
return result
def show_service(self, items):
service_id = int(items[2])
return {"type": "show_service", "number": service_id}
def startup_service(self, items):
service_id = int(items[2])
return {"type": "startup_service", "number": service_id}
def shutdown_service(self, items):
service_id = int(items[2])
return {"type": "shutdown_service", "number": service_id}
def restart_service(self, items):
service_id = int(items[2])
return {"type": "restart_service", "number": service_id}
def list_users(self, items):
return {"type": "list_users"}
def show_user(self, items):
user_name = items[2]
return {"type": "show_user", "user_name": user_name}
def drop_user(self, items):
user_name = items[2]
return {"type": "drop_user", "user_name": user_name}
def alter_user(self, items):
user_name = items[3]
new_password = items[4]
return {"type": "alter_user", "user_name": user_name, "password": new_password}
def create_user(self, items):
user_name = items[2]
password = items[3]
return {"type": "create_user", "user_name": user_name, "password": password, "role": "user"}
def activate_user(self, items):
user_name = items[3]
activate_status = items[4]
return {"type": "activate_user", "activate_status": activate_status, "user_name": user_name}
def list_datasets(self, items):
user_name = items[3]
return {"type": "list_datasets", "user_name": user_name}
def list_agents(self, items):
user_name = items[3]
return {"type": "list_agents", "user_name": user_name}
def create_role(self, items):
role_name = items[2]
if len(items) > 4:
description = items[4]
return {"type": "create_role", "role_name": role_name, "description": description}
else:
return {"type": "create_role", "role_name": role_name}
def drop_role(self, items):
role_name = items[2]
return {"type": "drop_role", "role_name": role_name}
def alter_role(self, items):
role_name = items[2]
description = items[5]
return {"type": "alter_role", "role_name": role_name, "description": description}
def list_roles(self, items):
return {"type": "list_roles"}
def show_role(self, items):
role_name = items[2]
return {"type": "show_role", "role_name": role_name}
def grant_permission(self, items):
action_list = items[1]
resource = items[3]
role_name = items[6]
return {"type": "grant_permission", "role_name": role_name, "resource": resource, "actions": action_list}
def revoke_permission(self, items):
action_list = items[1]
resource = items[3]
role_name = items[6]
return {"type": "revoke_permission", "role_name": role_name, "resource": resource, "actions": action_list}
def alter_user_role(self, items):
user_name = items[2]
role_name = items[5]
return {"type": "alter_user_role", "user_name": user_name, "role_name": role_name}
def show_user_permission(self, items):
user_name = items[3]
return {"type": "show_user_permission", "user_name": user_name}
def show_version(self, items):
return {"type": "show_version"}
def grant_admin(self, items):
user_name = items[2]
return {"type": "grant_admin", "user_name": user_name}
def revoke_admin(self, items):
user_name = items[2]
return {"type": "revoke_admin", "user_name": user_name}
def set_variable(self, items):
var_name = items[2]
var_value = items[3]
return {"type": "set_variable", "var_name": var_name, "var_value": var_value}
def show_variable(self, items):
var_name = items[2]
return {"type": "show_variable", "var_name": var_name}
def list_variables(self, items):
return {"type": "list_variables"}
def action_list(self, items):
return items
def meta_command(self, items):
command_name = str(items[0]).lower()
args = items[1:] if len(items) > 1 else []
# handle quoted parameter
parsed_args = []
for arg in args:
if hasattr(arg, "value"):
parsed_args.append(arg.value)
else:
parsed_args.append(str(arg))
return {"type": "meta", "command": command_name, "args": parsed_args}
def meta_command_name(self, items):
return items[0]
def meta_args(self, items):
return items
def encrypt(input_string):
pub = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArq9XTUSeYr2+N1h3Afl/z8Dse/2yD0ZGrKwx+EEEcdsBLca9Ynmx3nIB5obmLlSfmskLpBo0UACBmB5rEjBp2Q2f3AG3Hjd4B+gNCG6BDaawuDlgANIhGnaTLrIqWrrcm4EMzJOnAOI1fgzJRsOOUEfaS318Eq9OVO3apEyCCt0lOQK6PuksduOjVxtltDav+guVAA068NrPYmRNabVKRNLJpL8w4D44sfth5RvZ3q9t+6RTArpEtc5sh5ChzvqPOzKGMXW83C95TxmXqpbK6olN4RevSfVjEAgCydH6HN6OhtOQEcnrU97r9H0iZOWwbw3pVrZiUkuRD1R56Wzs2wIDAQAB\n-----END PUBLIC KEY-----"
pub_key = RSA.importKey(pub)
cipher = Cipher_pkcs1_v1_5.new(pub_key)
cipher_text = cipher.encrypt(base64.b64encode(input_string.encode("utf-8")))
return base64.b64encode(cipher_text).decode("utf-8")
def encode_to_base64(input_string):
base64_encoded = base64.b64encode(input_string.encode("utf-8"))
return base64_encoded.decode("utf-8")
def show_help():
"""Help info"""
help_text = """
Commands:
LIST SERVICES
SHOW SERVICE <service>
STARTUP SERVICE <service>
SHUTDOWN SERVICE <service>
RESTART SERVICE <service>
LIST USERS
SHOW USER <user>
DROP USER <user>
CREATE USER <user> <password>
ALTER USER PASSWORD <user> <new_password>
ALTER USER ACTIVE <user> <on/off>
LIST DATASETS OF <user>
LIST AGENTS OF <user>
CREATE ROLE <role>
DROP ROLE <role>
ALTER ROLE <role> SET DESCRIPTION <description>
LIST ROLES
SHOW ROLE <role>
GRANT <action_list> ON <function> TO ROLE <role>
REVOKE <action_list> ON <function> TO ROLE <role>
ALTER USER <user> SET ROLE <role>
SHOW USER PERMISSION <user>
SHOW VERSION
GRANT ADMIN <user>
REVOKE ADMIN <user>
Meta Commands:
\\?, \\h, \\help Show this help
\\q, \\quit, \\exit Quit the CLI
"""
print(help_text)
class AdminCLI(Cmd):
def __init__(self):
super().__init__()
self.parser = Lark(GRAMMAR, start="start", parser="lalr", transformer=AdminTransformer())
self.command_history = []
self.is_interactive = False
self.admin_account = "admin@ragflow.io"
self.admin_password: str = "admin"
self.session = requests.Session()
self.access_token: str = ""
self.host: str = ""
self.port: int = 0
intro = r"""Type "\h" for help."""
prompt = "admin> "
def onecmd(self, command: str) -> bool:
try:
result = self.parse_command(command)
if isinstance(result, dict):
if "type" in result and result.get("type") == "empty":
return False
self.execute_command(result)
if isinstance(result, Tree):
return False
if result.get("type") == "meta" and result.get("command") in ["q", "quit", "exit"]:
return True
except KeyboardInterrupt:
print("\nUse '\\q' to quit")
except EOFError:
print("\nGoodbye!")
return True
return False
def emptyline(self) -> bool:
return False
def default(self, line: str) -> bool:
return self.onecmd(line)
def parse_command(self, command_str: str) -> dict[str, str]:
if not command_str.strip():
return {"type": "empty"}
self.command_history.append(command_str)
try:
result = self.parser.parse(command_str)
return result
except Exception as e:
return {"type": "error", "message": f"Parse error: {str(e)}"}
def verify_admin(self, arguments: dict, single_command: bool):
self.host = arguments["host"]
self.port = arguments["port"]
print("Attempt to access server for admin login")
url = f"http://{self.host}:{self.port}/api/v1/admin/login"
attempt_count = 3
if single_command:
attempt_count = 1
try_count = 0
while True:
try_count += 1
if try_count > attempt_count:
return False
if single_command:
admin_passwd = arguments["password"]
else:
admin_passwd = getpass.getpass(f"password for {self.admin_account}: ").strip()
try:
self.admin_password = encrypt(admin_passwd)
response = self.session.post(url, json={"email": self.admin_account, "password": self.admin_password})
if response.status_code == 200:
res_json = response.json()
error_code = res_json.get("code", -1)
if error_code == 0:
self.session.headers.update({"Content-Type": "application/json", "Authorization": response.headers["Authorization"], "User-Agent": "RAGFlow-CLI/0.23.1"})
print("Authentication successful.")
return True
else:
error_message = res_json.get("message", "Unknown error")
print(f"Authentication failed: {error_message}, try again")
continue
else:
print(f"Bad response,status: {response.status_code}, password is wrong")
except Exception as e:
print(str(e))
print("Can't access server for admin login (connection failed)")
def _format_service_detail_table(self, data):
if isinstance(data, list):
return data
if not all([isinstance(v, list) for v in data.values()]):
# normal table
return data
# handle task_executor heartbeats map, for example {'name': [{'done': 2, 'now': timestamp1}, {'done': 3, 'now': timestamp2}]
task_executor_list = []
for k, v in data.items():
# display latest status
heartbeats = sorted(v, key=lambda x: x["now"], reverse=True)
task_executor_list.append(
{
"task_executor_name": k,
**heartbeats[0],
}
if heartbeats
else {"task_executor_name": k}
)
return task_executor_list
def _print_table_simple(self, data):
if not data:
print("No data to print")
return
if isinstance(data, dict):
# handle single row data
data = [data]
columns = list(set().union(*(d.keys() for d in data)))
columns.sort()
col_widths = {}
def get_string_width(text):
half_width_chars = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\t\n\r"
width = 0
for char in text:
if char in half_width_chars:
width += 1
else:
width += 2
return width
for col in columns:
max_width = get_string_width(str(col))
for item in data:
value_len = get_string_width(str(item.get(col, "")))
if value_len > max_width:
max_width = value_len
col_widths[col] = max(2, max_width)
# Generate delimiter
separator = "+" + "+".join(["-" * (col_widths[col] + 2) for col in columns]) + "+"
# Print header
print(separator)
header = "|" + "|".join([f" {col:<{col_widths[col]}} " for col in columns]) + "|"
print(header)
print(separator)
# Print data
for item in data:
row = "|"
for col in columns:
value = str(item.get(col, ""))
if get_string_width(value) > col_widths[col]:
value = value[: col_widths[col] - 3] + "..."
row += f" {value:<{col_widths[col] - (get_string_width(value) - len(value))}} |"
print(row)
print(separator)
def run_interactive(self):
self.is_interactive = True
print("RAGFlow Admin command line interface - Type '\\?' for help, '\\q' to quit")
while True:
try:
command = input("admin> ").strip()
if not command:
continue
print(f"command: {command}")
result = self.parse_command(command)
self.execute_command(result)
if isinstance(result, Tree):
continue
if result.get("type") == "meta" and result.get("command") in ["q", "quit", "exit"]:
break
except KeyboardInterrupt:
print("\nUse '\\q' to quit")
except EOFError:
print("\nGoodbye!")
break
def run_single_command(self, command: str):
result = self.parse_command(command)
self.execute_command(result)
def parse_connection_args(self, args: List[str]) -> Dict[str, Any]:
parser = argparse.ArgumentParser(description="Admin CLI Client", add_help=False)
parser.add_argument("-h", "--host", default="localhost", help="Admin service host")
parser.add_argument("-p", "--port", type=int, default=9381, help="Admin service port")
parser.add_argument("-w", "--password", default="admin", type=str, help="Superuser password")
parser.add_argument("command", nargs="?", help="Single command")
try:
parsed_args, remaining_args = parser.parse_known_args(args)
if remaining_args:
command = remaining_args[0]
return {"host": parsed_args.host, "port": parsed_args.port, "password": parsed_args.password, "command": command}
else:
return {
"host": parsed_args.host,
"port": parsed_args.port,
}
except SystemExit:
return {"error": "Invalid connection arguments"}
def execute_command(self, parsed_command: Dict[str, Any]):
command_dict: dict
if isinstance(parsed_command, Tree):
command_dict = parsed_command.children[0]
else:
if parsed_command["type"] == "error":
print(f"Error: {parsed_command['message']}")
return
else:
command_dict = parsed_command
# print(f"Parsed command: {command_dict}")
command_type = command_dict["type"]
match command_type:
case "list_services":
self._handle_list_services(command_dict)
case "show_service":
self._handle_show_service(command_dict)
case "restart_service":
self._handle_restart_service(command_dict)
case "shutdown_service":
self._handle_shutdown_service(command_dict)
case "startup_service":
self._handle_startup_service(command_dict)
case "list_users":
self._handle_list_users(command_dict)
case "show_user":
self._handle_show_user(command_dict)
case "drop_user":
self._handle_drop_user(command_dict)
case "alter_user":
self._handle_alter_user(command_dict)
case "create_user":
self._handle_create_user(command_dict)
case "activate_user":
self._handle_activate_user(command_dict)
case "list_datasets":
self._handle_list_datasets(command_dict)
case "list_agents":
self._handle_list_agents(command_dict)
case "create_role":
self._create_role(command_dict)
case "drop_role":
self._drop_role(command_dict)
case "alter_role":
self._alter_role(command_dict)
case "list_roles":
self._list_roles(command_dict)
case "show_role":
self._show_role(command_dict)
case "grant_permission":
self._grant_permission(command_dict)
case "revoke_permission":
self._revoke_permission(command_dict)
case "alter_user_role":
self._alter_user_role(command_dict)
case "show_user_permission":
self._show_user_permission(command_dict)
case "show_version":
self._show_version(command_dict)
case "grant_admin":
self._grant_admin(command_dict)
case "revoke_admin":
self._revoke_admin(command_dict)
case "set_variable":
self._set_variable(command_dict)
case "show_variable":
self._show_variable(command_dict)
case "list_variables":
self._list_variables(command_dict)
case "meta":
self._handle_meta_command(command_dict)
case _:
print(f"Command '{command_type}' would be executed with API")
def _handle_list_services(self, command):
print("Listing all services")
url = f"http://{self.host}:{self.port}/api/v1/admin/services"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to get all services, code: {res_json['code']}, message: {res_json['message']}")
def _handle_show_service(self, command):
service_id: int = command["number"]
print(f"Showing service: {service_id}")
url = f"http://{self.host}:{self.port}/api/v1/admin/services/{service_id}"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
res_data = res_json["data"]
if "status" in res_data and res_data["status"] == "alive":
print(f"Service {res_data['service_name']} is alive, ")
if isinstance(res_data["message"], str):
print(res_data["message"])
else:
data = self._format_service_detail_table(res_data["message"])
self._print_table_simple(data)
else:
print(f"Service {res_data['service_name']} is down, {res_data['message']}")
else:
print(f"Fail to show service, code: {res_json['code']}, message: {res_json['message']}")
def _handle_restart_service(self, command):
service_id: int = command["number"]
print(f"Restart service {service_id}")
def _handle_shutdown_service(self, command):
service_id: int = command["number"]
print(f"Shutdown service {service_id}")
def _handle_startup_service(self, command):
service_id: int = command["number"]
print(f"Startup service {service_id}")
def _handle_list_users(self, command):
print("Listing all users")
url = f"http://{self.host}:{self.port}/api/v1/admin/users"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to get all users, code: {res_json['code']}, message: {res_json['message']}")
def _handle_show_user(self, command):
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Showing user: {user_name}")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
table_data = res_json["data"]
table_data.pop("avatar")
self._print_table_simple(table_data)
else:
print(f"Fail to get user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def _handle_drop_user(self, command):
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Drop user: {user_name}")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}"
response = self.session.delete(url)
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to drop user, code: {res_json['code']}, message: {res_json['message']}")
def _handle_alter_user(self, command):
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
password_tree: Tree = command["password"]
password: str = password_tree.children[0].strip("'\"")
print(f"Alter user: {user_name}, password: ******")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/password"
response = self.session.put(url, json={"new_password": encrypt(password)})
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to alter password, code: {res_json['code']}, message: {res_json['message']}")
def _handle_create_user(self, command):
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
password_tree: Tree = command["password"]
password: str = password_tree.children[0].strip("'\"")
role: str = command["role"]
print(f"Create user: {user_name}, password: ******, role: {role}")
url = f"http://{self.host}:{self.port}/api/v1/admin/users"
response = self.session.post(url, json={"user_name": user_name, "password": encrypt(password), "role": role})
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to create user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def _handle_activate_user(self, command):
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
activate_tree: Tree = command["activate_status"]
activate_status: str = activate_tree.children[0].strip("'\"")
if activate_status.lower() in ["on", "off"]:
print(f"Alter user {user_name} activate status, turn {activate_status.lower()}.")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/activate"
response = self.session.put(url, json={"activate_status": activate_status})
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to alter activate status, code: {res_json['code']}, message: {res_json['message']}")
else:
print(f"Unknown activate status: {activate_status}.")
def _grant_admin(self, command):
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/admin"
# print(f"Grant admin: {url}")
# return
response = self.session.put(url)
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to grant {user_name} admin authorization, code: {res_json['code']}, message: {res_json['message']}")
def _revoke_admin(self, command):
user_name_tree: Tree = command["user_name"]
user_name: str = user_name_tree.children[0].strip("'\"")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/admin"
# print(f"Revoke admin: {url}")
# return
response = self.session.delete(url)
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to revoke {user_name} admin authorization, code: {res_json['code']}, message: {res_json['message']}")
def _set_variable(self, command):
var_name_tree: Tree = command["var_name"]
var_name = var_name_tree.children[0].strip("'\"")
var_value_tree: Tree = command["var_value"]
var_value = var_value_tree.children[0].strip("'\"")
url = f"http://{self.host}:{self.port}/api/v1/admin/variables"
response = self.session.put(url, json={"var_name": var_name, "var_value": var_value})
res_json = response.json()
if response.status_code == 200:
print(res_json["message"])
else:
print(f"Fail to set variable {var_name} to {var_value}, code: {res_json['code']}, message: {res_json['message']}")
def _show_variable(self, command):
var_name_tree: Tree = command["var_name"]
var_name = var_name_tree.children[0].strip("'\"")
url = f"http://{self.host}:{self.port}/api/v1/admin/variables"
response = self.session.get(url, json={"var_name": var_name})
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to get variable {var_name}, code: {res_json['code']}, message: {res_json['message']}")
def _list_variables(self, command):
url = f"http://{self.host}:{self.port}/api/v1/admin/variables"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
self._print_table_simple(res_json["data"])
else:
print(f"Fail to list variables, code: {res_json['code']}, message: {res_json['message']}")
def _handle_list_datasets(self, command):
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Listing all datasets of user: {user_name}")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/datasets"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
table_data = res_json["data"]
for t in table_data:
t.pop("avatar")
self._print_table_simple(table_data)
else:
print(f"Fail to get all datasets of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def _handle_list_agents(self, command):
username_tree: Tree = command["user_name"]
user_name: str = username_tree.children[0].strip("'\"")
print(f"Listing all agents of user: {user_name}")
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/agents"
response = self.session.get(url)
res_json = response.json()
if response.status_code == 200:
table_data = res_json["data"]
for t in table_data:
t.pop("avatar")
self._print_table_simple(table_data)
else:
print(f"Fail to get all agents of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
def _create_role(self, command):
role_name_tree: Tree = command["role_name"]
role_name: str = role_name_tree.children[0].strip("'\"")
desc_str: str = ""
if "description" in command:
desc_tree: Tree = command["description"]
desc_str = desc_tree.children[0].strip("'\"")
print(f"create role name: {role_name}, description: {desc_str}")
url = f"http://{self.host}:{self.port}/api/v1/admin/roles"
response = self.session.post(url, json={"role_name": role_name, "description": desc_str})
res_json = response.json()
if response.status_code == 200:
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/configs.py | test/testcases/configs.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
HOST_ADDRESS = os.getenv("HOST_ADDRESS", "http://127.0.0.1:9380")
VERSION = "v1"
ZHIPU_AI_API_KEY = os.getenv("ZHIPU_AI_API_KEY")
if ZHIPU_AI_API_KEY is None:
pytest.exit("Error: Environment variable ZHIPU_AI_API_KEY must be set")
EMAIL = "qa@infiniflow.org"
# password is "123"
PASSWORD = """ctAseGvejiaSWWZ88T/m4FQVOpQyUvP+x7sXtdv3feqZACiQleuewkUi35E16wSd5C5QcnkkcV9cYc8TKPTRZlxappDuirxghxoOvFcJxFU4ixLsD
fN33jCHRoDUW81IH9zjij/vaw8IbVyb6vuwg6MX6inOEBRRzVbRYxXOu1wkWY6SsI8X70oF9aeLFp/PzQpjoe/YbSqpTq8qqrmHzn9vO+yvyYyvmDsphXe
X8f7fp9c7vUsfOCkM+gHY3PadG+QHa7KI7mzTKgUTZImK6BZtfRBATDTthEUbbaTewY4H0MnWiCeeDhcbeQao6cFy1To8pE3RpmxnGnS8BsBn8w=="""
INVALID_API_TOKEN = "invalid_key_123"
DATASET_NAME_LIMIT = 128
DOCUMENT_NAME_LIMIT = 255
CHAT_ASSISTANT_NAME_LIMIT = 255
SESSION_WITH_CHAT_NAME_LIMIT = 255
DEFAULT_PARSER_CONFIG = {
"layout_recognize": "DeepDOC",
"chunk_token_num": 512,
"delimiter": "\n",
"auto_keywords": 0,
"auto_questions": 0,
"html4excel": False,
"image_context_size": 0,
"table_context_size": 0,
"topn_tags": 3,
"llm_id": "glm-4-flash@ZHIPU-AI",
"raptor": {
"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0,
},
"graphrag": {
"use_graphrag": True,
"entity_types": [
"organization",
"person",
"geo",
"event",
"category",
],
"method": "light",
},
}
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/conftest.py | test/testcases/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import requests
from configs import EMAIL, HOST_ADDRESS, PASSWORD, VERSION, ZHIPU_AI_API_KEY
MARKER_EXPRESSIONS = {
"p1": "p1",
"p2": "p1 or p2",
"p3": "p1 or p2 or p3",
}
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--level",
action="store",
default="p2",
choices=list(MARKER_EXPRESSIONS.keys()),
help=f"Test level ({'/'.join(MARKER_EXPRESSIONS)}): p1=smoke, p2=core, p3=full",
)
parser.addoption(
"--client-type",
action="store",
default="http",
choices=["python_sdk", "http", "web"],
help="Test client type: 'python_sdk', 'http', 'web'",
)
def pytest_configure(config: pytest.Config) -> None:
level = config.getoption("--level")
config.option.markexpr = MARKER_EXPRESSIONS[level]
if config.option.verbose > 0:
print(f"\n[CONFIG] Active test level: {level}")
def register():
url = HOST_ADDRESS + f"/{VERSION}/user/register"
name = "qa"
register_data = {"email": EMAIL, "nickname": name, "password": PASSWORD}
res = requests.post(url=url, json=register_data)
res = res.json()
if res.get("code") != 0 and "has already registered" not in res.get("message"):
raise Exception(res.get("message"))
def login():
url = HOST_ADDRESS + f"/{VERSION}/user/login"
login_data = {"email": EMAIL, "password": PASSWORD}
response = requests.post(url=url, json=login_data)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
auth = response.headers["Authorization"]
return auth
@pytest.fixture(scope="session")
def auth():
try:
register()
except Exception as e:
print(e)
auth = login()
return auth
@pytest.fixture(scope="session")
def token(auth):
url = HOST_ADDRESS + f"/{VERSION}/system/new_token"
auth = {"Authorization": auth}
response = requests.post(url=url, headers=auth)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
return res["data"].get("token")
def get_my_llms(auth, name):
url = HOST_ADDRESS + f"/{VERSION}/llm/my_llms"
authorization = {"Authorization": auth}
response = requests.get(url=url, headers=authorization)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
if name in res.get("data"):
return True
return False
def add_models(auth):
url = HOST_ADDRESS + f"/{VERSION}/llm/set_api_key"
authorization = {"Authorization": auth}
models_info = {
"ZHIPU-AI": {"llm_factory": "ZHIPU-AI", "api_key": ZHIPU_AI_API_KEY},
}
for name, model_info in models_info.items():
if not get_my_llms(auth, name):
response = requests.post(url=url, headers=authorization, json=model_info)
res = response.json()
if res.get("code") != 0:
pytest.exit(f"Critical error in add_models: {res.get('message')}")
def get_tenant_info(auth):
url = HOST_ADDRESS + f"/{VERSION}/user/tenant_info"
authorization = {"Authorization": auth}
response = requests.get(url=url, headers=authorization)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
return res["data"].get("tenant_id")
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info(auth):
tenant_id = None
try:
add_models(auth)
tenant_id = get_tenant_info(auth)
except Exception as e:
pytest.exit(f"Error in set_tenant_info: {str(e)}")
url = HOST_ADDRESS + f"/{VERSION}/user/set_tenant_info"
authorization = {"Authorization": auth}
tenant_info = {
"tenant_id": tenant_id,
"llm_id": "glm-4-flash@ZHIPU-AI",
"embd_id": "BAAI/bge-small-en-v1.5@Builtin",
"img2txt_id": "",
"asr_id": "",
"tts_id": None,
}
response = requests.post(url=url, headers=authorization, json=tenant_info)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/common.py | test/testcases/test_sdk_api/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from ragflow_sdk import Chat, Chunk, DataSet, Document, RAGFlow, Session
from utils.file_utils import create_txt_file
# DATASET MANAGEMENT
def batch_create_datasets(client: RAGFlow, num: int) -> list[DataSet]:
return [client.create_dataset(name=f"dataset_{i}") for i in range(num)]
# FILE MANAGEMENT WITHIN DATASET
def bulk_upload_documents(dataset: DataSet, num: int, tmp_path: Path) -> list[Document]:
document_infos = []
for i in range(num):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
with fp.open("rb") as f:
blob = f.read()
document_infos.append({"display_name": fp.name, "blob": blob})
return dataset.upload_documents(document_infos)
# CHUNK MANAGEMENT WITHIN DATASET
def batch_add_chunks(document: Document, num: int) -> list[Chunk]:
return [document.add_chunk(content=f"chunk test {i}") for i in range(num)]
# CHAT ASSISTANT MANAGEMENT
def batch_create_chat_assistants(client: RAGFlow, num: int) -> list[Chat]:
return [client.create_chat(name=f"test_chat_assistant_{i}") for i in range(num)]
# SESSION MANAGEMENT
def batch_add_sessions_with_chat_assistant(chat_assistant: Chat, num) -> list[Session]:
return [chat_assistant.create_session(name=f"session_with_chat_assistant_{i}") for i in range(num)]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/conftest.py | test/testcases/test_sdk_api/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from time import sleep
import pytest
from common import (
batch_add_chunks,
batch_create_chat_assistants,
batch_create_datasets,
bulk_upload_documents,
)
from configs import HOST_ADDRESS, VERSION
from pytest import FixtureRequest
from ragflow_sdk import Chat, Chunk, DataSet, Document, RAGFlow
from utils import wait_for
from utils.file_utils import (
create_docx_file,
create_eml_file,
create_excel_file,
create_html_file,
create_image_file,
create_json_file,
create_md_file,
create_pdf_file,
create_ppt_file,
create_txt_file,
)
@wait_for(30, 1, "Document parsing timeout")
def condition(_dataset: DataSet):
documents = _dataset.list_documents(page_size=1000)
for document in documents:
if document.run != "DONE":
return False
return True
@pytest.fixture
def generate_test_files(request: FixtureRequest, tmp_path: Path):
file_creators = {
"docx": (tmp_path / "ragflow_test.docx", create_docx_file),
"excel": (tmp_path / "ragflow_test.xlsx", create_excel_file),
"ppt": (tmp_path / "ragflow_test.pptx", create_ppt_file),
"image": (tmp_path / "ragflow_test.png", create_image_file),
"pdf": (tmp_path / "ragflow_test.pdf", create_pdf_file),
"txt": (tmp_path / "ragflow_test.txt", create_txt_file),
"md": (tmp_path / "ragflow_test.md", create_md_file),
"json": (tmp_path / "ragflow_test.json", create_json_file),
"eml": (tmp_path / "ragflow_test.eml", create_eml_file),
"html": (tmp_path / "ragflow_test.html", create_html_file),
}
files = {}
for file_type, (file_path, creator_func) in file_creators.items():
if request.param in ["", file_type]:
creator_func(file_path)
files[file_type] = file_path
return files
@pytest.fixture(scope="class")
def ragflow_tmp_dir(request: FixtureRequest, tmp_path_factory: Path) -> Path:
class_name = request.cls.__name__
return tmp_path_factory.mktemp(class_name)
@pytest.fixture(scope="session")
def client(token: str) -> RAGFlow:
return RAGFlow(api_key=token, base_url=HOST_ADDRESS, version=VERSION)
@pytest.fixture(scope="function")
def clear_datasets(request: FixtureRequest, client: RAGFlow):
def cleanup():
client.delete_datasets(ids=None)
request.addfinalizer(cleanup)
@pytest.fixture(scope="function")
def clear_chat_assistants(request: FixtureRequest, client: RAGFlow):
def cleanup():
client.delete_chats(ids=None)
request.addfinalizer(cleanup)
@pytest.fixture(scope="function")
def clear_session_with_chat_assistants(request, add_chat_assistants):
def cleanup():
for chat_assistant in chat_assistants:
try:
chat_assistant.delete_sessions(ids=None)
except Exception:
pass
request.addfinalizer(cleanup)
_, _, chat_assistants = add_chat_assistants
@pytest.fixture(scope="class")
def add_dataset(request: FixtureRequest, client: RAGFlow) -> DataSet:
def cleanup():
client.delete_datasets(ids=None)
request.addfinalizer(cleanup)
return batch_create_datasets(client, 1)[0]
@pytest.fixture(scope="function")
def add_dataset_func(request: FixtureRequest, client: RAGFlow) -> DataSet:
def cleanup():
client.delete_datasets(ids=None)
request.addfinalizer(cleanup)
return batch_create_datasets(client, 1)[0]
@pytest.fixture(scope="class")
def add_document(add_dataset: DataSet, ragflow_tmp_dir: Path) -> tuple[DataSet, Document]:
return add_dataset, bulk_upload_documents(add_dataset, 1, ragflow_tmp_dir)[0]
@pytest.fixture(scope="class")
def add_chunks(request: FixtureRequest, add_document: tuple[DataSet, Document]) -> tuple[DataSet, Document, list[Chunk]]:
def cleanup():
try:
document.delete_chunks(ids=[])
except Exception:
pass
request.addfinalizer(cleanup)
dataset, document = add_document
dataset.async_parse_documents([document.id])
condition(dataset)
chunks = batch_add_chunks(document, 4)
# issues/6487
sleep(1)
return dataset, document, chunks
@pytest.fixture(scope="class")
def add_chat_assistants(request, client, add_document) -> tuple[DataSet, Document, list[Chat]]:
def cleanup():
try:
client.delete_chats(ids=None)
except Exception:
pass
request.addfinalizer(cleanup)
dataset, document = add_document
dataset.async_parse_documents([document.id])
condition(dataset)
return dataset, document, batch_create_chat_assistants(client, 5)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py | test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_create_chat_assistants
class TestChatAssistantsDelete:
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
pytest.param(None, "", 0, marks=pytest.mark.p3),
pytest.param({"ids": []}, "", 0, marks=pytest.mark.p3),
pytest.param({"ids": ["invalid_id"]}, "Assistant(invalid_id) not found.", 5, marks=pytest.mark.p3),
pytest.param({"ids": ["\n!?。;!?\"'"]}, """Assistant(\n!?。;!?"\') not found.""", 5, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1]}, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, "", 0, marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, client, add_chat_assistants_func, payload, expected_message, remaining):
_, _, chat_assistants = add_chat_assistants_func
if callable(payload):
payload = payload([chat_assistant.id for chat_assistant in chat_assistants])
if expected_message:
with pytest.raises(Exception) as exception_info:
client.delete_chats(**payload)
assert expected_message in str(exception_info.value)
else:
if payload is None:
client.delete_chats(payload)
else:
client.delete_chats(**payload)
assistants = client.list_chats()
assert len(assistants) == remaining
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, client, add_chat_assistants_func, payload):
_, _, chat_assistants = add_chat_assistants_func
payload = payload([chat_assistant.id for chat_assistant in chat_assistants])
client.delete_chats(**payload)
assistants = client.list_chats()
assert len(assistants) == 0
@pytest.mark.p3
def test_repeated_deletion(self, client, add_chat_assistants_func):
_, _, chat_assistants = add_chat_assistants_func
chat_ids = [chat.id for chat in chat_assistants]
client.delete_chats(ids=chat_ids)
with pytest.raises(Exception) as exception_info:
client.delete_chats(ids=chat_ids)
assert "not found" in str(exception_info.value)
@pytest.mark.p3
def test_duplicate_deletion(self, client, add_chat_assistants_func):
_, _, chat_assistants = add_chat_assistants_func
chat_ids = [chat.id for chat in chat_assistants]
client.delete_chats(ids=chat_ids + chat_ids)
assistants = client.list_chats()
assert len(assistants) == 0
@pytest.mark.p3
def test_concurrent_deletion(self, client):
count = 100
chat_ids = [client.create_chat(name=f"test_{i}").id for i in range(count)]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.delete_chats, ids=[chat_ids[i]]) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count
assert all(future.exception() is None for future in futures)
@pytest.mark.p3
def test_delete_1k(self, client):
chat_assistants = batch_create_chat_assistants(client, 1_000)
client.delete_chats(ids=[chat_assistants.id for chat_assistants in chat_assistants])
assistants = client.list_chats()
assert len(assistants) == 0
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chat_assistant_management/conftest.py | test/testcases/test_sdk_api/test_chat_assistant_management/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import batch_create_chat_assistants
from pytest import FixtureRequest
from ragflow_sdk import Chat, DataSet, Document, RAGFlow
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def condition(_dataset: DataSet):
documents = _dataset.list_documents(page_size=1000)
for document in documents:
if document.run != "DONE":
return False
return True
@pytest.fixture(scope="function")
def add_chat_assistants_func(request: FixtureRequest, client: RAGFlow, add_document: tuple[DataSet, Document]) -> tuple[DataSet, Document, list[Chat]]:
def cleanup():
client.delete_chats(ids=None)
request.addfinalizer(cleanup)
dataset, document = add_document
dataset.async_parse_documents([document.id])
condition(dataset)
return dataset, document, batch_create_chat_assistants(client, 5)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chat_assistant_management/test_create_chat_assistant.py | test/testcases/test_sdk_api/test_chat_assistant_management/test_create_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import attrgetter
import pytest
from configs import CHAT_ASSISTANT_NAME_LIMIT
from ragflow_sdk import Chat
from utils import encode_avatar
from utils.file_utils import create_image_file
@pytest.mark.usefixtures("clear_chat_assistants")
class TestChatAssistantCreate:
@pytest.mark.p1
@pytest.mark.usefixtures("add_chunks")
@pytest.mark.parametrize(
"name, expected_message",
[
("valid_name", ""),
pytest.param("a" * (CHAT_ASSISTANT_NAME_LIMIT + 1), "", marks=pytest.mark.skip(reason="issues/")),
pytest.param(1, "", marks=pytest.mark.skip(reason="issues/")),
("", "`name` is required."),
("duplicated_name", "Duplicated chat name in creating chat."),
("case insensitive", "Duplicated chat name in creating chat."),
],
)
def test_name(self, client, name, expected_message):
if name == "duplicated_name":
client.create_chat(name=name)
elif name == "case insensitive":
client.create_chat(name=name.upper())
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name=name)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name=name)
assert chat_assistant.name == name
@pytest.mark.p1
@pytest.mark.parametrize(
"dataset_ids, expected_message",
[
([], ""),
(lambda r: [r], ""),
(["invalid_dataset_id"], "You don't own the dataset invalid_dataset_id"),
("invalid_dataset_id", "You don't own the dataset i"),
],
)
def test_dataset_ids(self, client, add_chunks, dataset_ids, expected_message):
dataset, _, _ = add_chunks
if callable(dataset_ids):
dataset_ids = dataset_ids(dataset.id)
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name="ragflow test", dataset_ids=dataset_ids)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name="ragflow test", dataset_ids=dataset_ids)
assert chat_assistant.name == "ragflow test"
@pytest.mark.p3
def test_avatar(self, client, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png")
chat_assistant = client.create_chat(name="avatar_test", avatar=encode_avatar(fn), dataset_ids=[])
assert chat_assistant.name == "avatar_test"
@pytest.mark.p2
@pytest.mark.parametrize(
"llm, expected_message",
[
({}, ""),
({"model_name": "glm-4"}, ""),
({"model_name": "unknown"}, "`model_name` unknown doesn't exist"),
({"temperature": 0}, ""),
({"temperature": 1}, ""),
pytest.param({"temperature": -1}, "", marks=pytest.mark.skip),
pytest.param({"temperature": 10}, "", marks=pytest.mark.skip),
pytest.param({"temperature": "a"}, "", marks=pytest.mark.skip),
({"top_p": 0}, ""),
({"top_p": 1}, ""),
pytest.param({"top_p": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_p": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_p": "a"}, "", marks=pytest.mark.skip),
({"presence_penalty": 0}, ""),
({"presence_penalty": 1}, ""),
pytest.param({"presence_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": "a"}, "", marks=pytest.mark.skip),
({"frequency_penalty": 0}, ""),
({"frequency_penalty": 1}, ""),
pytest.param({"frequency_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": "a"}, "", marks=pytest.mark.skip),
({"max_token": 0}, ""),
({"max_token": 1024}, ""),
pytest.param({"max_token": -1}, "", marks=pytest.mark.skip),
pytest.param({"max_token": 10}, "", marks=pytest.mark.skip),
pytest.param({"max_token": "a"}, "", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_llm(self, client, add_chunks, llm, expected_message):
dataset, _, _ = add_chunks
llm_o = Chat.LLM(client, llm)
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name="llm_test", dataset_ids=[dataset.id], llm=llm_o)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name="llm_test", dataset_ids=[dataset.id], llm=llm_o)
if llm:
for k, v in llm.items():
assert attrgetter(k)(chat_assistant.llm) == v
else:
assert attrgetter("model_name")(chat_assistant.llm) == "glm-4-flash@ZHIPU-AI"
assert attrgetter("temperature")(chat_assistant.llm) == 0.1
assert attrgetter("top_p")(chat_assistant.llm) == 0.3
assert attrgetter("presence_penalty")(chat_assistant.llm) == 0.4
assert attrgetter("frequency_penalty")(chat_assistant.llm) == 0.7
assert attrgetter("max_tokens")(chat_assistant.llm) == 512
@pytest.mark.p2
@pytest.mark.parametrize(
"prompt, expected_message",
[
({"similarity_threshold": 0}, ""),
({"similarity_threshold": 1}, ""),
pytest.param({"similarity_threshold": -1}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": 10}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": "a"}, "", marks=pytest.mark.skip),
({"keywords_similarity_weight": 0}, ""),
({"keywords_similarity_weight": 1}, ""),
pytest.param({"keywords_similarity_weight": -1}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": 10}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": "a"}, "", marks=pytest.mark.skip),
({"variables": []}, ""),
({"top_n": 0}, ""),
({"top_n": 1}, ""),
pytest.param({"top_n": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_n": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_n": "a"}, "", marks=pytest.mark.skip),
({"empty_response": "Hello World"}, ""),
({"empty_response": ""}, ""),
({"empty_response": "!@#$%^&*()"}, ""),
({"empty_response": "中文测试"}, ""),
pytest.param({"empty_response": 123}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": True}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": " "}, "", marks=pytest.mark.skip),
({"opener": "Hello World"}, ""),
({"opener": ""}, ""),
({"opener": "!@#$%^&*()"}, ""),
({"opener": "中文测试"}, ""),
pytest.param({"opener": 123}, "", marks=pytest.mark.skip),
pytest.param({"opener": True}, "", marks=pytest.mark.skip),
pytest.param({"opener": " "}, "", marks=pytest.mark.skip),
({"show_quote": True}, ""),
({"show_quote": False}, ""),
({"prompt": "Hello World {knowledge}"}, ""),
({"prompt": "{knowledge}"}, ""),
({"prompt": "!@#$%^&*() {knowledge}"}, ""),
({"prompt": "中文测试 {knowledge}"}, ""),
({"prompt": "Hello World"}, ""),
({"prompt": "Hello World", "variables": []}, ""),
pytest.param({"prompt": 123}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"prompt": True}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_prompt(self, client, add_chunks, prompt, expected_message):
dataset, _, _ = add_chunks
prompt_o = Chat.Prompt(client, prompt)
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name="prompt_test", dataset_ids=[dataset.id], prompt=prompt_o)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name="prompt_test", dataset_ids=[dataset.id], prompt=prompt_o)
if prompt:
for k, v in prompt.items():
if k == "keywords_similarity_weight":
assert attrgetter(k)(chat_assistant.prompt) == 1 - v
else:
assert attrgetter(k)(chat_assistant.prompt) == v
else:
assert attrgetter("similarity_threshold")(chat_assistant.prompt) == 0.2
assert attrgetter("keywords_similarity_weight")(chat_assistant.prompt) == 0.7
assert attrgetter("top_n")(chat_assistant.prompt) == 6
assert attrgetter("variables")(chat_assistant.prompt) == [{"key": "knowledge", "optional": False}]
assert attrgetter("rerank_model")(chat_assistant.prompt) == ""
assert attrgetter("empty_response")(chat_assistant.prompt) == "Sorry! No relevant content was found in the knowledge base!"
assert attrgetter("opener")(chat_assistant.prompt) == "Hi! I'm your assistant. What can I do for you?"
assert attrgetter("show_quote")(chat_assistant.prompt) is True
assert (
attrgetter("prompt")(chat_assistant.prompt)
== 'You are an intelligent assistant. Please summarize the content of the dataset to answer the question. Please list the data in the dataset and answer in detail. When all dataset content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the dataset!" Answers need to consider chat history.\n Here is the knowledge base:\n {knowledge}\n The above is the knowledge base.'
)
class TestChatAssistantCreate2:
@pytest.mark.p2
def test_unparsed_document(self, client, add_document):
dataset, _ = add_document
with pytest.raises(Exception) as exception_info:
client.create_chat(name="prompt_test", dataset_ids=[dataset.id])
assert "doesn't own parsed file" in str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chat_assistant_management/test_update_chat_assistant.py | test/testcases/test_sdk_api/test_chat_assistant_management/test_update_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import attrgetter
import pytest
from configs import CHAT_ASSISTANT_NAME_LIMIT
from ragflow_sdk import Chat
from utils import encode_avatar
from utils.file_utils import create_image_file
class TestChatAssistantUpdate:
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param({"name": "valid_name"}, "", marks=pytest.mark.p1),
pytest.param({"name": "a" * (CHAT_ASSISTANT_NAME_LIMIT + 1)}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": ""}, "`name` cannot be empty.", marks=pytest.mark.p3),
pytest.param({"name": "test_chat_assistant_1"}, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
pytest.param({"name": "TEST_CHAT_ASSISTANT_1"}, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
],
)
def test_name(self, client, add_chat_assistants_func, payload, expected_message):
_, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.update(payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
assert updated_chat.name == payload["name"], str(updated_chat)
@pytest.mark.p3
def test_avatar(self, client, add_chat_assistants_func, tmp_path):
dataset, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": [dataset.id]}
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
assert updated_chat.name == payload["name"], str(updated_chat)
assert updated_chat.avatar is not None, str(updated_chat)
@pytest.mark.p3
@pytest.mark.parametrize(
"llm, expected_message",
[
({}, "ValueError"),
({"model_name": "glm-4"}, ""),
({"model_name": "unknown"}, "`model_name` unknown doesn't exist"),
({"temperature": 0}, ""),
({"temperature": 1}, ""),
pytest.param({"temperature": -1}, "", marks=pytest.mark.skip),
pytest.param({"temperature": 10}, "", marks=pytest.mark.skip),
pytest.param({"temperature": "a"}, "", marks=pytest.mark.skip),
({"top_p": 0}, ""),
({"top_p": 1}, ""),
pytest.param({"top_p": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_p": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_p": "a"}, "", marks=pytest.mark.skip),
({"presence_penalty": 0}, ""),
({"presence_penalty": 1}, ""),
pytest.param({"presence_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": "a"}, "", marks=pytest.mark.skip),
({"frequency_penalty": 0}, ""),
({"frequency_penalty": 1}, ""),
pytest.param({"frequency_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": "a"}, "", marks=pytest.mark.skip),
({"max_token": 0}, ""),
({"max_token": 1024}, ""),
pytest.param({"max_token": -1}, "", marks=pytest.mark.skip),
pytest.param({"max_token": 10}, "", marks=pytest.mark.skip),
pytest.param({"max_token": "a"}, "", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_llm(self, client, add_chat_assistants_func, llm, expected_message):
dataset, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
payload = {"name": "llm_test", "llm": llm, "dataset_ids": [dataset.id]}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.update(payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
if llm:
for k, v in llm.items():
assert attrgetter(k)(updated_chat.llm) == v, str(updated_chat)
else:
excepted_value = Chat.LLM(
client,
{
"model_name": "glm-4-flash@ZHIPU-AI",
"temperature": 0.1,
"top_p": 0.3,
"presence_penalty": 0.4,
"frequency_penalty": 0.7,
"max_tokens": 512,
},
)
assert str(updated_chat.llm) == str(excepted_value), str(updated_chat)
@pytest.mark.p3
@pytest.mark.parametrize(
"prompt, expected_message",
[
({}, "ValueError"),
({"similarity_threshold": 0}, ""),
({"similarity_threshold": 1}, ""),
pytest.param({"similarity_threshold": -1}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": 10}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": "a"}, "", marks=pytest.mark.skip),
({"keywords_similarity_weight": 0}, ""),
({"keywords_similarity_weight": 1}, ""),
pytest.param({"keywords_similarity_weight": -1}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": 10}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": "a"}, "", marks=pytest.mark.skip),
({"variables": []}, ""),
({"top_n": 0}, ""),
({"top_n": 1}, ""),
pytest.param({"top_n": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_n": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_n": "a"}, "", marks=pytest.mark.skip),
({"empty_response": "Hello World"}, ""),
({"empty_response": ""}, ""),
({"empty_response": "!@#$%^&*()"}, ""),
({"empty_response": "中文测试"}, ""),
pytest.param({"empty_response": 123}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": True}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": " "}, "", marks=pytest.mark.skip),
({"opener": "Hello World"}, ""),
({"opener": ""}, ""),
({"opener": "!@#$%^&*()"}, ""),
({"opener": "中文测试"}, ""),
pytest.param({"opener": 123}, "", marks=pytest.mark.skip),
pytest.param({"opener": True}, "", marks=pytest.mark.skip),
pytest.param({"opener": " "}, "", marks=pytest.mark.skip),
({"show_quote": True}, ""),
({"show_quote": False}, ""),
({"prompt": "Hello World {knowledge}"}, ""),
({"prompt": "{knowledge}"}, ""),
({"prompt": "!@#$%^&*() {knowledge}"}, ""),
({"prompt": "中文测试 {knowledge}"}, ""),
({"prompt": "Hello World"}, ""),
({"prompt": "Hello World", "variables": []}, ""),
pytest.param({"prompt": 123}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"prompt": True}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_prompt(self, client, add_chat_assistants_func, prompt, expected_message):
dataset, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
payload = {"name": "prompt_test", "prompt": prompt, "dataset_ids": [dataset.id]}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.update(payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
if prompt:
for k, v in prompt.items():
if k == "keywords_similarity_weight":
assert attrgetter(k)(updated_chat.prompt) == 1 - v, str(updated_chat)
else:
assert attrgetter(k)(updated_chat.prompt) == v, str(updated_chat)
else:
excepted_value = Chat.LLM(
client,
{
"similarity_threshold": 0.2,
"keywords_similarity_weight": 0.7,
"top_n": 6,
"variables": [{"key": "knowledge", "optional": False}],
"rerank_model": "",
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
"opener": "Hi! I'm your assistant. What can I do for you?",
"show_quote": True,
"prompt": 'You are an intelligent assistant. Please summarize the content of the dataset to answer the question. Please list the data in the dataset and answer in detail. When all dataset content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the dataset!" Answers need to consider chat history.\n Here is the knowledge base:\n {knowledge}\n The above is the knowledge base.',
},
)
assert str(updated_chat.prompt) == str(excepted_value), str(updated_chat)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chat_assistant_management/test_list_chat_assistants.py | test/testcases/test_sdk_api/test_chat_assistant_management/test_list_chat_assistants.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
@pytest.mark.usefixtures("add_chat_assistants")
class TestChatAssistantsList:
@pytest.mark.p1
def test_default(self, client):
assistants = client.list_chats()
assert len(assistants) == 5
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": 0, "page_size": 2}, 2, ""),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 0, "not instance of"),
pytest.param(
{"page": -1, "page_size": 2},
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page": "a", "page_size": 2},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page(self, client, params, expected_page_size, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
assert len(assistants) == expected_page_size
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": 0}, 0, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 0, "not instance of"),
pytest.param(
{"page_size": -1},
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page_size": "a"},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page_size(self, client, params, expected_page_size, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
assert len(assistants) == expected_page_size
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"orderby": "create_time"}, ""),
({"orderby": "update_time"}, ""),
pytest.param({"orderby": "name", "desc": "False"}, "", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"orderby": "unknown"}, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_orderby(self, client, params, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
client.list_chats(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"desc": None}, "not instance of"),
({"desc": "true"}, "not instance of"),
({"desc": "True"}, "not instance of"),
({"desc": True}, ""),
({"desc": "false"}, "not instance of"),
({"desc": "False"}, "not instance of"),
({"desc": False}, ""),
({"desc": "False", "orderby": "update_time"}, "not instance of"),
pytest.param(
{"desc": "unknown"},
"desc should be true or false",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_desc(self, client, params, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
client.list_chats(**params)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_num, expected_message",
[
({"name": None}, 5, ""),
({"name": ""}, 5, ""),
({"name": "test_chat_assistant_1"}, 1, ""),
({"name": "unknown"}, 0, "The chat doesn't exist"),
],
)
def test_name(self, client, params, expected_num, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
if params["name"] in [None, ""]:
assert len(assistants) == expected_num
else:
assert assistants[0].name == params["name"]
@pytest.mark.p1
@pytest.mark.parametrize(
"chat_assistant_id, expected_num, expected_message",
[
(None, 5, ""),
("", 5, ""),
(lambda r: r[0], 1, ""),
("unknown", 0, "The chat doesn't exist"),
],
)
def test_id(self, client, add_chat_assistants, chat_assistant_id, expected_num, expected_message):
_, _, chat_assistants = add_chat_assistants
if callable(chat_assistant_id):
params = {"id": chat_assistant_id([chat.id for chat in chat_assistants])}
else:
params = {"id": chat_assistant_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
if params["id"] in [None, ""]:
assert len(assistants) == expected_num
else:
assert assistants[0].id == params["id"]
@pytest.mark.p3
@pytest.mark.parametrize(
"chat_assistant_id, name, expected_num, expected_message",
[
(lambda r: r[0], "test_chat_assistant_0", 1, ""),
(lambda r: r[0], "test_chat_assistant_1", 0, "The chat doesn't exist"),
(lambda r: r[0], "unknown", 0, "The chat doesn't exist"),
("id", "chat_assistant_0", 0, "The chat doesn't exist"),
],
)
def test_name_and_id(self, client, add_chat_assistants, chat_assistant_id, name, expected_num, expected_message):
_, _, chat_assistants = add_chat_assistants
if callable(chat_assistant_id):
params = {"id": chat_assistant_id([chat.id for chat in chat_assistants]), "name": name}
else:
params = {"id": chat_assistant_id, "name": name}
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
assert len(assistants) == expected_num
@pytest.mark.p3
def test_concurrent_list(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.list_chats) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p2
def test_list_chats_after_deleting_associated_dataset(self, client, add_chat_assistants):
dataset, _, _ = add_chat_assistants
client.delete_datasets(ids=[dataset.id])
assistants = client.list_chats()
assert len(assistants) == 5
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_dataset_mangement/test_list_datasets.py | test/testcases/test_sdk_api/test_dataset_mangement/test_list_datasets.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import HOST_ADDRESS, INVALID_API_TOKEN
from ragflow_sdk import RAGFlow
class TestAuthorization:
@pytest.mark.p1
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "Authentication error: API key is invalid!"),
(INVALID_API_TOKEN, "Authentication error: API key is invalid!"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.list_datasets()
assert expected_message in str(exception_info.value)
class TestCapability:
@pytest.mark.p3
def test_concurrent_list(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
client.list_datasets,
)
for _ in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.usefixtures("add_datasets")
class TestDatasetsList:
@pytest.mark.p1
def test_params_unset(self, client):
datasets = client.list_datasets()
assert len(datasets) == 5, str(datasets)
@pytest.mark.p2
def test_params_empty(self, client):
datasets = client.list_datasets(**{})
assert len(datasets) == 5, str(datasets)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 2, "page_size": 2}, 2),
({"page": 3, "page_size": 2}, 1),
({"page": 4, "page_size": 2}, 0),
({"page": 1, "page_size": 10}, 5),
],
ids=["normal_middle_page", "normal_last_partial_page", "beyond_max_page", "full_data_single_page"],
)
def test_page(self, client, params, expected_page_size):
datasets = client.list_datasets(**params)
assert len(datasets) == expected_page_size, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_message",
[
({"page": 0}, "Input should be greater than or equal to 1"),
({"page": "a"}, "not instance of"),
],
ids=["page_0", "page_a"],
)
def test_page_invalid(self, client, params, expected_message):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_page_none(self, client):
params = {"page": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page_size": 1}, 1),
({"page_size": 3}, 3),
({"page_size": 5}, 5),
({"page_size": 6}, 5),
],
ids=["min_valid_page_size", "medium_page_size", "page_size_equals_total", "page_size_exceeds_total"],
)
def test_page_size(self, client, params, expected_page_size):
datasets = client.list_datasets(**params)
assert len(datasets) == expected_page_size, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_message",
[
({"page_size": 0}, "Input should be greater than or equal to 1"),
({"page_size": "a"}, "not instance of"),
],
)
def test_page_size_invalid(self, client, params, expected_message):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_page_size_none(self, client):
params = {"page_size": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"params",
[
{"orderby": "create_time"},
{"orderby": "update_time"},
],
ids=["orderby_create_time", "orderby_update_time"],
)
def test_orderby(self, client, params):
client.list_datasets(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params",
[
{"orderby": ""},
{"orderby": "unknown"},
{"orderby": "CREATE_TIME"},
{"orderby": "UPDATE_TIME"},
{"orderby": " create_time "},
],
ids=["empty", "unknown", "orderby_create_time_upper", "orderby_update_time_upper", "whitespace"],
)
def test_orderby_invalid(self, client, params):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Input should be 'create_time' or 'update_time'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_orderby_none(self, client):
params = {"orderby": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"params",
[
{"desc": True},
{"desc": False},
],
ids=["desc=True", "desc=False"],
)
def test_desc(self, client, params):
client.list_datasets(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params",
[
{"desc": 3.14},
{"desc": "unknown"},
],
ids=["float_value", "invalid_string"],
)
def test_desc_invalid(self, client, params):
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_desc_none(self, client):
params = {"desc": None}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
def test_name(self, client):
params = {"name": "dataset_1"}
datasets = client.list_datasets(**params)
assert len(datasets) == 1, str(datasets)
assert datasets[0].name == "dataset_1", str(datasets)
@pytest.mark.p2
def test_name_wrong(self, client):
params = {"name": "wrong name"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_name_empty(self, client):
params = {"name": ""}
datasets = client.list_datasets(**params)
assert len(datasets) == 5, str(datasets)
@pytest.mark.p2
def test_name_none(self, client):
params = {"name": None}
datasets = client.list_datasets(**params)
assert len(datasets) == 5, str(datasets)
@pytest.mark.p1
def test_id(self, client, add_datasets):
dataset_ids = [dataset.id for dataset in add_datasets]
params = {"id": dataset_ids[0]}
datasets = client.list_datasets(**params)
assert len(datasets) == 1, str(datasets)
assert datasets[0].id == dataset_ids[0], str(datasets)
@pytest.mark.p2
def test_id_not_uuid(self, client):
params = {"id": "not_uuid"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_not_uuid1(self, client):
params = {"id": uuid.uuid4().hex}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_wrong_uuid(self, client):
params = {"id": "d94a8dc02c9711f0930f7fbc369eab6d"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_empty(self, client):
params = {"id": ""}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_id_none(self, client):
params = {"id": None}
datasets = client.list_datasets(**params)
assert len(datasets) == 5, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"func, name, expected_num",
[
(lambda r: r[0].id, "dataset_0", 1),
(lambda r: r[0].id, "dataset_1", 0),
],
ids=["name_and_id_match", "name_and_id_mismatch"],
)
def test_name_and_id(self, client, add_datasets, func, name, expected_num):
params = None
if callable(func):
params = {"id": func(add_datasets), "name": name}
datasets = client.list_datasets(**params)
assert len(datasets) == expected_num, str(datasets)
@pytest.mark.p3
@pytest.mark.parametrize(
"dataset_id, name",
[
(lambda r: r[0].id, "wrong_name"),
(uuid.uuid1().hex, "dataset_0"),
],
ids=["name", "id"],
)
def test_name_and_id_wrong(self, client, add_datasets, dataset_id, name):
if callable(dataset_id):
params = {"id": dataset_id(add_datasets), "name": name}
else:
params = {"id": dataset_id, "name": name}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_field_unsupported(self, client):
params = {"unknown_field": "unknown_field"}
with pytest.raises(Exception) as exception_info:
client.list_datasets(**params)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_dataset_mangement/test_delete_datasets.py | test/testcases/test_sdk_api/test_dataset_mangement/test_delete_datasets.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_create_datasets
from configs import HOST_ADDRESS, INVALID_API_TOKEN
from ragflow_sdk import RAGFlow
class TestAuthorization:
@pytest.mark.p1
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "Authentication error: API key is invalid!"),
(INVALID_API_TOKEN, "Authentication error: API key is invalid!"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.delete_datasets()
assert str(exception_info.value) == expected_message
class TestCapability:
@pytest.mark.p3
def test_delete_dataset_1k(self, client):
datasets = batch_create_datasets(client, 1_000)
client.delete_datasets(**{"ids": [dataset.id for dataset in datasets]})
datasets = client.list_datasets()
assert len(datasets) == 0, datasets
@pytest.mark.p3
def test_concurrent_deletion(self, client):
count = 1_000
datasets = batch_create_datasets(client, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.delete_datasets, **{"ids": [dataset.id for dataset in datasets][i : i + 1]}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
datasets = client.list_datasets()
assert len(datasets) == 0, datasets
class TestDatasetsDelete:
@pytest.mark.p1
@pytest.mark.parametrize(
"func, remaining",
[
(lambda r: {"ids": r[:1]}, 2),
(lambda r: {"ids": r}, 0),
],
ids=["single_dataset", "multiple_datasets"],
)
def test_ids(self, client, add_datasets_func, func, remaining):
payload = None
if callable(func):
payload = func([dataset.id for dataset in add_datasets_func])
client.delete_datasets(**payload)
datasets = client.list_datasets()
assert len(datasets) == remaining, str(datasets)
@pytest.mark.p1
@pytest.mark.usefixtures("add_dataset_func")
def test_ids_empty(self, client):
payload = {"ids": []}
client.delete_datasets(**payload)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
@pytest.mark.p1
@pytest.mark.usefixtures("add_datasets_func")
def test_ids_none(self, client):
payload = {"ids": None}
client.delete_datasets(**payload)
datasets = client.list_datasets()
assert len(datasets) == 0, str(datasets)
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_id_not_uuid(self, client):
payload = {"ids": ["not_uuid"]}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
@pytest.mark.p3
@pytest.mark.usefixtures("add_dataset_func")
def test_id_not_uuid1(self, client):
payload = {"ids": [uuid.uuid4().hex]}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "Invalid UUID1 format" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_id_wrong_uuid(self, client):
payload = {"ids": ["d94a8dc02c9711f0930f7fbc369eab6d"]}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
@pytest.mark.p2
@pytest.mark.parametrize(
"func",
[
lambda r: {"ids": ["d94a8dc02c9711f0930f7fbc369eab6d"] + r},
lambda r: {"ids": r[:1] + ["d94a8dc02c9711f0930f7fbc369eab6d"] + r[1:3]},
lambda r: {"ids": r + ["d94a8dc02c9711f0930f7fbc369eab6d"]},
],
)
def test_ids_partial_invalid(self, client, add_datasets_func, func):
if callable(func):
payload = func([dataset.id for dataset in add_datasets_func])
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 3, str(datasets)
@pytest.mark.p2
def test_ids_duplicate(self, client, add_datasets_func):
dataset_ids = [dataset.id for dataset in add_datasets_func]
payload = {"ids": dataset_ids + dataset_ids}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "Duplicate ids:" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 3, str(datasets)
@pytest.mark.p2
def test_repeated_delete(self, client, add_datasets_func):
dataset_ids = [dataset.id for dataset in add_datasets_func]
payload = {"ids": dataset_ids}
client.delete_datasets(**payload)
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "lacks permission for dataset" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_field_unsupported(self, client):
payload = {"unknown_field": "unknown_field"}
with pytest.raises(Exception) as exception_info:
client.delete_datasets(**payload)
assert "got an unexpected keyword argument 'unknown_field'" in str(exception_info.value), str(exception_info.value)
datasets = client.list_datasets()
assert len(datasets) == 1, str(datasets)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_dataset_mangement/test_create_dataset.py | test/testcases/test_sdk_api/test_dataset_mangement/test_create_dataset.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from operator import attrgetter
import pytest
from configs import DATASET_NAME_LIMIT, DEFAULT_PARSER_CONFIG, HOST_ADDRESS, INVALID_API_TOKEN
from hypothesis import example, given, settings
from ragflow_sdk import DataSet, RAGFlow
from utils import encode_avatar
from utils.file_utils import create_image_file
from utils.hypothesis_utils import valid_names
@pytest.mark.usefixtures("clear_datasets")
class TestAuthorization:
@pytest.mark.p1
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "Authentication error: API key is invalid!"),
(INVALID_API_TOKEN, "Authentication error: API key is invalid!"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.create_dataset(**{"name": "auth_test"})
assert str(exception_info.value) == expected_message
@pytest.mark.usefixtures("clear_datasets")
class TestCapability:
@pytest.mark.p3
def test_create_dataset_1k(self, client):
count = 1_000
for i in range(count):
payload = {"name": f"dataset_{i}"}
client.create_dataset(**payload)
assert len(client.list_datasets(page_size=2000)) == count
@pytest.mark.p3
def test_create_dataset_concurrent(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.create_dataset, **{"name": f"dataset_{i}"}) for i in range(100)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.usefixtures("clear_datasets")
class TestDatasetCreate:
@pytest.mark.p1
@given(name=valid_names())
@example("a" * 128)
@settings(max_examples=20)
def test_name(self, client, name):
dataset = client.create_dataset(**{"name": name})
assert dataset.name == name, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "String should have at least 1 character"),
(" ", "String should have at least 1 character"),
("a" * (DATASET_NAME_LIMIT + 1), "String should have at most 128 characters"),
(0, "not instance of"),
(None, "not instance of"),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, client, name, expected_message):
with pytest.raises(Exception) as exception_info:
client.create_dataset(**{"name": name})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_name_duplicated(self, client):
name = "duplicated_name"
payload = {"name": name}
client.create_dataset(**payload)
dataset = client.create_dataset(**payload)
assert dataset.name == name + "(1)", str(dataset)
@pytest.mark.p3
def test_name_case_insensitive(self, client):
name = "CaseInsensitive"
payload = {"name": name.upper()}
client.create_dataset(**payload)
payload = {"name": name.lower()}
dataset = client.create_dataset(**payload)
assert dataset.name == name.lower() + "(1)", str(dataset)
@pytest.mark.p2
def test_avatar(self, client, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {
"name": "avatar",
"avatar": f"data:image/png;base64,{encode_avatar(fn)}",
}
client.create_dataset(**payload)
@pytest.mark.p2
def test_avatar_exceeds_limit_length(self, client):
payload = {"name": "avatar_exceeds_limit_length", "avatar": "a" * 65536}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.parametrize(
"name, prefix, expected_message",
[
("empty_prefix", "", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("missing_comma", "data:image/png;base64", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("unsupported_mine_type", "invalid_mine_prefix:image/png;base64,", "Invalid MIME prefix format. Must start with 'data:'"),
("invalid_mine_type", "data:unsupported_mine_type;base64,", "Unsupported MIME type. Allowed: ['image/jpeg', 'image/png']"),
],
ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"],
)
def test_avatar_invalid_prefix(self, client, tmp_path, name, prefix, expected_message):
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {
"name": name,
"avatar": f"{prefix}{encode_avatar(fn)}",
}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_avatar_unset(self, client):
payload = {"name": "avatar_unset"}
dataset = client.create_dataset(**payload)
assert dataset.avatar is None, str(dataset)
@pytest.mark.p2
def test_description(self, client):
payload = {"name": "description", "description": "description"}
dataset = client.create_dataset(**payload)
assert dataset.description == "description", str(dataset)
@pytest.mark.p2
def test_description_exceeds_limit_length(self, client):
payload = {"name": "description_exceeds_limit_length", "description": "a" * 65536}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_description_unset(self, client):
payload = {"name": "description_unset"}
dataset = client.create_dataset(**payload)
assert dataset.description is None, str(dataset)
@pytest.mark.p3
def test_description_none(self, client):
payload = {"name": "description_none", "description": None}
dataset = client.create_dataset(**payload)
assert dataset.description is None, str(dataset)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, embedding_model",
[
("BAAI/bge-small-en-v1.5@Builtin", "BAAI/bge-small-en-v1.5@Builtin"),
("embedding-3@ZHIPU-AI", "embedding-3@ZHIPU-AI"),
],
ids=["builtin_baai", "tenant_zhipu"],
)
def test_embedding_model(self, client, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model}
dataset = client.create_dataset(**payload)
assert dataset.embedding_model == embedding_model, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("unknown_llm_name", "unknown@ZHIPU-AI"),
("unknown_llm_factory", "embedding-3@unknown"),
("tenant_no_auth_default_tenant_llm", "text-embedding-v3@Tongyi-Qianwen"),
("tenant_no_auth", "text-embedding-3-small@OpenAI"),
],
ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"],
)
def test_embedding_model_invalid(self, client, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
if "tenant_no_auth" in name:
assert str(exception_info.value) == f"Unauthorized model: <{embedding_model}>", str(exception_info.value)
else:
assert str(exception_info.value) == f"Unsupported model: <{embedding_model}>", str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("empty", ""),
("space", " "),
("missing_at", "BAAI/bge-small-en-v1.5Builtin"),
("missing_model_name", "@Builtin"),
("missing_provider", "BAAI/bge-small-en-v1.5@"),
("whitespace_only_model_name", " @Builtin"),
("whitespace_only_provider", "BAAI/bge-small-en-v1.5@ "),
],
ids=["empty", "space", "missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"],
)
def test_embedding_model_format(self, client, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
if name in ["empty", "space", "missing_at"]:
assert "Embedding model identifier must follow <model_name>@<provider> format" in str(exception_info.value), str(exception_info.value)
else:
assert "Both model_name and provider must be non-empty strings" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_embedding_model_unset(self, client):
payload = {"name": "embedding_model_unset"}
dataset = client.create_dataset(**payload)
assert dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(dataset)
@pytest.mark.p2
def test_embedding_model_none(self, client):
payload = {"name": "embedding_model_none", "embedding_model": None}
dataset = client.create_dataset(**payload)
assert dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(dataset)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, permission",
[
("me", "me"),
("team", "team"),
],
ids=["me", "team"],
)
def test_permission(self, client, name, permission):
payload = {"name": name, "permission": permission}
dataset = client.create_dataset(**payload)
assert dataset.permission == permission.lower().strip(), str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, permission",
[
("empty", ""),
("unknown", "unknown"),
("me_upercase", "ME"),
("team_upercase", "TEAM"),
("whitespace", " ME "),
],
ids=["empty", "unknown", "me_upercase", "team_upercase", "whitespace"],
)
def test_permission_invalid(self, client, name, permission):
payload = {"name": name, "permission": permission}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "Input should be 'me' or 'team'" in str(exception_info.value)
@pytest.mark.p2
def test_permission_unset(self, client):
payload = {"name": "permission_unset"}
dataset = client.create_dataset(**payload)
assert dataset.permission == "me", str(dataset)
@pytest.mark.p3
def test_permission_none(self, client):
payload = {"name": "permission_none", "permission": None}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, chunk_method",
[
("naive", "naive"),
("book", "book"),
("email", "email"),
("laws", "laws"),
("manual", "manual"),
("one", "one"),
("paper", "paper"),
("picture", "picture"),
("presentation", "presentation"),
("qa", "qa"),
("table", "table"),
("tag", "tag"),
],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
)
def test_chunk_method(self, client, name, chunk_method):
payload = {"name": name, "chunk_method": chunk_method}
dataset = client.create_dataset(**payload)
assert dataset.chunk_method == chunk_method, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, chunk_method",
[
("empty", ""),
("unknown", "unknown"),
],
ids=["empty", "unknown"],
)
def test_chunk_method_invalid(self, client, name, chunk_method):
payload = {"name": name, "chunk_method": chunk_method}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_chunk_method_unset(self, client):
payload = {"name": "chunk_method_unset"}
dataset = client.create_dataset(**payload)
assert dataset.chunk_method == "naive", str(dataset)
@pytest.mark.p3
def test_chunk_method_none(self, client):
payload = {"name": "chunk_method_none", "chunk_method": None}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, parser_config",
[
("auto_keywords_min", {"auto_keywords": 0}),
("auto_keywords_mid", {"auto_keywords": 16}),
("auto_keywords_max", {"auto_keywords": 32}),
("auto_questions_min", {"auto_questions": 0}),
("auto_questions_mid", {"auto_questions": 5}),
("auto_questions_max", {"auto_questions": 10}),
("chunk_token_num_min", {"chunk_token_num": 1}),
("chunk_token_num_mid", {"chunk_token_num": 1024}),
("chunk_token_num_max", {"chunk_token_num": 2048}),
("delimiter", {"delimiter": "\n"}),
("delimiter_space", {"delimiter": " "}),
("html4excel_true", {"html4excel": True}),
("html4excel_false", {"html4excel": False}),
("layout_recognize_DeepDOC", {"layout_recognize": "DeepDOC"}),
("layout_recognize_navie", {"layout_recognize": "Plain Text"}),
("tag_kb_ids", {"tag_kb_ids": ["1", "2"]}),
("topn_tags_min", {"topn_tags": 1}),
("topn_tags_mid", {"topn_tags": 5}),
("topn_tags_max", {"topn_tags": 10}),
("filename_embd_weight_min", {"filename_embd_weight": 0.1}),
("filename_embd_weight_mid", {"filename_embd_weight": 0.5}),
("filename_embd_weight_max", {"filename_embd_weight": 1.0}),
("task_page_size_min", {"task_page_size": 1}),
("task_page_size_None", {"task_page_size": None}),
("pages", {"pages": [[1, 100]]}),
("pages_none", {"pages": None}),
("graphrag_true", {"graphrag": {"use_graphrag": True}}),
("graphrag_false", {"graphrag": {"use_graphrag": False}}),
("graphrag_entity_types", {"graphrag": {"entity_types": ["age", "sex", "height", "weight"]}}),
("graphrag_method_general", {"graphrag": {"method": "general"}}),
("graphrag_method_light", {"graphrag": {"method": "light"}}),
("graphrag_community_true", {"graphrag": {"community": True}}),
("graphrag_community_false", {"graphrag": {"community": False}}),
("graphrag_resolution_true", {"graphrag": {"resolution": True}}),
("graphrag_resolution_false", {"graphrag": {"resolution": False}}),
("raptor_true", {"raptor": {"use_raptor": True}}),
("raptor_false", {"raptor": {"use_raptor": False}}),
("raptor_prompt", {"raptor": {"prompt": "Who are you?"}}),
("raptor_max_token_min", {"raptor": {"max_token": 1}}),
("raptor_max_token_mid", {"raptor": {"max_token": 1024}}),
("raptor_max_token_max", {"raptor": {"max_token": 2048}}),
("raptor_threshold_min", {"raptor": {"threshold": 0.0}}),
("raptor_threshold_mid", {"raptor": {"threshold": 0.5}}),
("raptor_threshold_max", {"raptor": {"threshold": 1.0}}),
("raptor_max_cluster_min", {"raptor": {"max_cluster": 1}}),
("raptor_max_cluster_mid", {"raptor": {"max_cluster": 512}}),
("raptor_max_cluster_max", {"raptor": {"max_cluster": 1024}}),
("raptor_random_seed_min", {"raptor": {"random_seed": 0}}),
],
ids=[
"auto_keywords_min",
"auto_keywords_mid",
"auto_keywords_max",
"auto_questions_min",
"auto_questions_mid",
"auto_questions_max",
"chunk_token_num_min",
"chunk_token_num_mid",
"chunk_token_num_max",
"delimiter",
"delimiter_space",
"html4excel_true",
"html4excel_false",
"layout_recognize_DeepDOC",
"layout_recognize_navie",
"tag_kb_ids",
"topn_tags_min",
"topn_tags_mid",
"topn_tags_max",
"filename_embd_weight_min",
"filename_embd_weight_mid",
"filename_embd_weight_max",
"task_page_size_min",
"task_page_size_None",
"pages",
"pages_none",
"graphrag_true",
"graphrag_false",
"graphrag_entity_types",
"graphrag_method_general",
"graphrag_method_light",
"graphrag_community_true",
"graphrag_community_false",
"graphrag_resolution_true",
"graphrag_resolution_false",
"raptor_true",
"raptor_false",
"raptor_prompt",
"raptor_max_token_min",
"raptor_max_token_mid",
"raptor_max_token_max",
"raptor_threshold_min",
"raptor_threshold_mid",
"raptor_threshold_max",
"raptor_max_cluster_min",
"raptor_max_cluster_mid",
"raptor_max_cluster_max",
"raptor_random_seed_min",
],
)
def test_parser_config(self, client, name, parser_config):
parser_config_o = DataSet.ParserConfig(client, parser_config)
payload = {"name": name, "parser_config": parser_config_o}
dataset = client.create_dataset(**payload)
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(dataset.parser_config) == vv, str(dataset)
else:
assert attrgetter(k)(dataset.parser_config) == v, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, parser_config, expected_message",
[
("auto_keywords_min_limit", {"auto_keywords": -1}, "Input should be greater than or equal to 0"),
("auto_keywords_max_limit", {"auto_keywords": 33}, "Input should be less than or equal to 32"),
("auto_keywords_float_not_allowed", {"auto_keywords": 3.14}, "Input should be a valid integer"),
("auto_keywords_type_invalid", {"auto_keywords": "string"}, "Input should be a valid integer"),
("auto_questions_min_limit", {"auto_questions": -1}, "Input should be greater than or equal to 0"),
("auto_questions_max_limit", {"auto_questions": 11}, "Input should be less than or equal to 10"),
("auto_questions_float_not_allowed", {"auto_questions": 3.14}, "Input should be a valid integer"),
("auto_questions_type_invalid", {"auto_questions": "string"}, "Input should be a valid integer"),
("chunk_token_num_min_limit", {"chunk_token_num": 0}, "Input should be greater than or equal to 1"),
("chunk_token_num_max_limit", {"chunk_token_num": 2049}, "Input should be less than or equal to 2048"),
("chunk_token_num_float_not_allowed", {"chunk_token_num": 3.14}, "Input should be a valid integer"),
("chunk_token_num_type_invalid", {"chunk_token_num": "string"}, "Input should be a valid integer"),
("delimiter_empty", {"delimiter": ""}, "String should have at least 1 character"),
("html4excel_type_invalid", {"html4excel": "string"}, "Input should be a valid boolean"),
("tag_kb_ids_not_list", {"tag_kb_ids": "1,2"}, "Input should be a valid list"),
("tag_kb_ids_int_in_list", {"tag_kb_ids": [1, 2]}, "Input should be a valid string"),
("topn_tags_min_limit", {"topn_tags": 0}, "Input should be greater than or equal to 1"),
("topn_tags_max_limit", {"topn_tags": 11}, "Input should be less than or equal to 10"),
("topn_tags_float_not_allowed", {"topn_tags": 3.14}, "Input should be a valid integer"),
("topn_tags_type_invalid", {"topn_tags": "string"}, "Input should be a valid integer"),
("filename_embd_weight_min_limit", {"filename_embd_weight": -1}, "Input should be greater than or equal to 0"),
("filename_embd_weight_max_limit", {"filename_embd_weight": 1.1}, "Input should be less than or equal to 1"),
("filename_embd_weight_type_invalid", {"filename_embd_weight": "string"}, "Input should be a valid number"),
("task_page_size_min_limit", {"task_page_size": 0}, "Input should be greater than or equal to 1"),
("task_page_size_float_not_allowed", {"task_page_size": 3.14}, "Input should be a valid integer"),
("task_page_size_type_invalid", {"task_page_size": "string"}, "Input should be a valid integer"),
("pages_not_list", {"pages": "1,2"}, "Input should be a valid list"),
("pages_not_list_in_list", {"pages": ["1,2"]}, "Input should be a valid list"),
("pages_not_int_list", {"pages": [["string1", "string2"]]}, "Input should be a valid integer"),
("graphrag_type_invalid", {"graphrag": {"use_graphrag": "string"}}, "Input should be a valid boolean"),
("graphrag_entity_types_not_list", {"graphrag": {"entity_types": "1,2"}}, "Input should be a valid list"),
("graphrag_entity_types_not_str_in_list", {"graphrag": {"entity_types": [1, 2]}}, "nput should be a valid string"),
("graphrag_method_unknown", {"graphrag": {"method": "unknown"}}, "Input should be 'light' or 'general'"),
("graphrag_method_none", {"graphrag": {"method": None}}, "Input should be 'light' or 'general'"),
("graphrag_community_type_invalid", {"graphrag": {"community": "string"}}, "Input should be a valid boolean"),
("graphrag_resolution_type_invalid", {"graphrag": {"resolution": "string"}}, "Input should be a valid boolean"),
("raptor_type_invalid", {"raptor": {"use_raptor": "string"}}, "Input should be a valid boolean"),
("raptor_prompt_empty", {"raptor": {"prompt": ""}}, "String should have at least 1 character"),
("raptor_prompt_space", {"raptor": {"prompt": " "}}, "String should have at least 1 character"),
("raptor_max_token_min_limit", {"raptor": {"max_token": 0}}, "Input should be greater than or equal to 1"),
("raptor_max_token_max_limit", {"raptor": {"max_token": 2049}}, "Input should be less than or equal to 2048"),
("raptor_max_token_float_not_allowed", {"raptor": {"max_token": 3.14}}, "Input should be a valid integer"),
("raptor_max_token_type_invalid", {"raptor": {"max_token": "string"}}, "Input should be a valid integer"),
("raptor_threshold_min_limit", {"raptor": {"threshold": -0.1}}, "Input should be greater than or equal to 0"),
("raptor_threshold_max_limit", {"raptor": {"threshold": 1.1}}, "Input should be less than or equal to 1"),
("raptor_threshold_type_invalid", {"raptor": {"threshold": "string"}}, "Input should be a valid number"),
("raptor_max_cluster_min_limit", {"raptor": {"max_cluster": 0}}, "Input should be greater than or equal to 1"),
("raptor_max_cluster_max_limit", {"raptor": {"max_cluster": 1025}}, "Input should be less than or equal to 1024"),
("raptor_max_cluster_float_not_allowed", {"raptor": {"max_cluster": 3.14}}, "Input should be a valid integer"),
("raptor_max_cluster_type_invalid", {"raptor": {"max_cluster": "string"}}, "Input should be a valid integer"),
("raptor_random_seed_min_limit", {"raptor": {"random_seed": -1}}, "Input should be greater than or equal to 0"),
("raptor_random_seed_float_not_allowed", {"raptor": {"random_seed": 3.14}}, "Input should be a valid integer"),
("raptor_random_seed_type_invalid", {"raptor": {"random_seed": "string"}}, "Input should be a valid integer"),
("parser_config_type_invalid", {"delimiter": "a" * 65536}, "Parser config exceeds size limit (max 65,535 characters)"),
],
ids=[
"auto_keywords_min_limit",
"auto_keywords_max_limit",
"auto_keywords_float_not_allowed",
"auto_keywords_type_invalid",
"auto_questions_min_limit",
"auto_questions_max_limit",
"auto_questions_float_not_allowed",
"auto_questions_type_invalid",
"chunk_token_num_min_limit",
"chunk_token_num_max_limit",
"chunk_token_num_float_not_allowed",
"chunk_token_num_type_invalid",
"delimiter_empty",
"html4excel_type_invalid",
"tag_kb_ids_not_list",
"tag_kb_ids_int_in_list",
"topn_tags_min_limit",
"topn_tags_max_limit",
"topn_tags_float_not_allowed",
"topn_tags_type_invalid",
"filename_embd_weight_min_limit",
"filename_embd_weight_max_limit",
"filename_embd_weight_type_invalid",
"task_page_size_min_limit",
"task_page_size_float_not_allowed",
"task_page_size_type_invalid",
"pages_not_list",
"pages_not_list_in_list",
"pages_not_int_list",
"graphrag_type_invalid",
"graphrag_entity_types_not_list",
"graphrag_entity_types_not_str_in_list",
"graphrag_method_unknown",
"graphrag_method_none",
"graphrag_community_type_invalid",
"graphrag_resolution_type_invalid",
"raptor_type_invalid",
"raptor_prompt_empty",
"raptor_prompt_space",
"raptor_max_token_min_limit",
"raptor_max_token_max_limit",
"raptor_max_token_float_not_allowed",
"raptor_max_token_type_invalid",
"raptor_threshold_min_limit",
"raptor_threshold_max_limit",
"raptor_threshold_type_invalid",
"raptor_max_cluster_min_limit",
"raptor_max_cluster_max_limit",
"raptor_max_cluster_float_not_allowed",
"raptor_max_cluster_type_invalid",
"raptor_random_seed_min_limit",
"raptor_random_seed_float_not_allowed",
"raptor_random_seed_type_invalid",
"parser_config_type_invalid",
],
)
def test_parser_config_invalid(self, client, name, parser_config, expected_message):
parser_config_o = DataSet.ParserConfig(client, parser_config)
payload = {"name": name, "parser_config": parser_config_o}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_parser_config_empty(self, client):
excepted_value = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
parser_config_o = DataSet.ParserConfig(client, {})
payload = {"name": "parser_config_empty", "parser_config": parser_config_o}
dataset = client.create_dataset(**payload)
assert str(dataset.parser_config) == str(excepted_value), str(dataset)
@pytest.mark.p2
def test_parser_config_unset(self, client):
excepted_value = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
payload = {"name": "parser_config_unset"}
dataset = client.create_dataset(**payload)
assert str(dataset.parser_config) == str(excepted_value), str(dataset)
@pytest.mark.p3
def test_parser_config_none(self, client):
excepted_value = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
payload = {"name": "parser_config_empty", "parser_config": None}
dataset = client.create_dataset(**payload)
assert str(dataset.parser_config) == str(excepted_value), str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
{"name": "id", "id": "id"},
{"name": "tenant_id", "tenant_id": "e57c1966f99211efb41e9e45646e0111"},
{"name": "created_by", "created_by": "created_by"},
{"name": "create_date", "create_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"name": "create_time", "create_time": 1741671443322},
{"name": "update_date", "update_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"name": "update_time", "update_time": 1741671443339},
{"name": "document_count", "document_count": 1},
{"name": "chunk_count", "chunk_count": 1},
{"name": "token_num", "token_num": 1},
{"name": "status", "status": "1"},
{"name": "pagerank", "pagerank": 50},
{"name": "unknown_field", "unknown_field": "unknown_field"},
],
)
def test_unsupported_field(self, client, payload):
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
@pytest.mark.usefixtures("clear_datasets")
class TestParserConfigBugFix:
@pytest.mark.p1
def test_parser_config_missing_raptor_and_graphrag(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024})
payload = {"name": "test_parser_config_missing_fields_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert hasattr(config, "raptor"), "raptor field should be present"
assert hasattr(config, "graphrag"), "graphrag field should be present"
assert config.raptor.use_raptor is False, "raptor.use_raptor should default to False"
assert config.graphrag.use_graphrag is False, "graphrag.use_graphrag should default to False"
assert config.chunk_token_num == 1024, "User-provided chunk_token_num should be preserved"
@pytest.mark.p1
def test_parser_config_with_only_raptor(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024, "raptor": {"use_raptor": True}})
payload = {"name": "test_parser_config_only_raptor_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert config.raptor.use_raptor is True, "User-provided raptor.use_raptor should be preserved"
assert hasattr(config, "graphrag"), "graphrag field should be present"
assert config.graphrag.use_graphrag is False, "graphrag.use_graphrag should default to False"
@pytest.mark.p1
def test_parser_config_with_only_graphrag(self, client):
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_dataset_mangement/conftest.py | test/testcases/test_sdk_api/test_dataset_mangement/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import batch_create_datasets
@pytest.fixture(scope="class")
def add_datasets(client, request):
def cleanup():
client.delete_datasets(**{"ids": None})
request.addfinalizer(cleanup)
return batch_create_datasets(client, 5)
@pytest.fixture(scope="function")
def add_datasets_func(client, request):
def cleanup():
client.delete_datasets(**{"ids": None})
request.addfinalizer(cleanup)
return batch_create_datasets(client, 3)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_dataset_mangement/test_update_dataset.py | test/testcases/test_sdk_api/test_dataset_mangement/test_update_dataset.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from operator import attrgetter
import pytest
from configs import DATASET_NAME_LIMIT
from hypothesis import HealthCheck, example, given, settings
from ragflow_sdk import DataSet
from utils import encode_avatar
from utils.file_utils import create_image_file
from utils.hypothesis_utils import valid_names
from configs import DEFAULT_PARSER_CONFIG
class TestRquest:
@pytest.mark.p2
def test_payload_empty(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({})
assert "No properties were modified" in str(exception_info.value), str(exception_info.value)
class TestCapability:
@pytest.mark.p3
def test_update_dateset_concurrent(self, add_dataset_func):
dataset = add_dataset_func
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(dataset.update, {"name": f"dataset_{i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
class TestDatasetUpdate:
@pytest.mark.p1
@given(name=valid_names())
@example("a" * 128)
@settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_name(self, client, add_dataset_func, name):
dataset = add_dataset_func
payload = {"name": name}
dataset.update(payload)
assert dataset.name == name, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.name == name, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "String should have at least 1 character"),
(" ", "String should have at least 1 character"),
("a" * (DATASET_NAME_LIMIT + 1), "String should have at most 128 characters"),
(0, "Input should be a valid string"),
(None, "Input should be a valid string"),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, add_dataset_func, name, expected_message):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_name_duplicated(self, add_datasets_func):
datasets = add_datasets_func
name = "dataset_1"
with pytest.raises(Exception) as exception_info:
datasets[0].update({"name": name})
assert f"Dataset name '{name}' already exists" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_name_case_insensitive(self, add_datasets_func):
dataset = add_datasets_func[0]
name = "DATASET_1"
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name})
assert f"Dataset name '{name}' already exists" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_avatar(self, client, add_dataset_func, tmp_path):
dataset = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png")
avatar_data = f"data:image/png;base64,{encode_avatar(fn)}"
dataset.update({"avatar": avatar_data})
assert dataset.avatar == avatar_data, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.avatar == avatar_data, str(retrieved_dataset)
@pytest.mark.p2
def test_avatar_exceeds_limit_length(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"avatar": "a" * 65536})
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.parametrize(
"avatar_prefix, expected_message",
[
("", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("data:image/png;base64", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("invalid_mine_prefix:image/png;base64,", "Invalid MIME prefix format. Must start with 'data:'"),
("data:unsupported_mine_type;base64,", "Unsupported MIME type. Allowed: ['image/jpeg', 'image/png']"),
],
ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"],
)
def test_avatar_invalid_prefix(self, add_dataset_func, tmp_path, avatar_prefix, expected_message):
dataset = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png")
with pytest.raises(Exception) as exception_info:
dataset.update({"avatar": f"{avatar_prefix}{encode_avatar(fn)}"})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_avatar_none(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"avatar": None})
assert dataset.avatar is None, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.avatar is None, str(retrieved_dataset)
@pytest.mark.p2
def test_description(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"description": "description"})
assert dataset.description == "description", str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.description == "description", str(retrieved_dataset)
@pytest.mark.p2
def test_description_exceeds_limit_length(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"description": "a" * 65536})
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_description_none(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"description": None})
assert dataset.description is None, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.description is None, str(retrieved_dataset)
@pytest.mark.p1
@pytest.mark.parametrize(
"embedding_model",
[
"BAAI/bge-small-en-v1.5@Builtin",
"embedding-3@ZHIPU-AI",
],
ids=["builtin_baai", "tenant_zhipu"],
)
def test_embedding_model(self, client, add_dataset_func, embedding_model):
dataset = add_dataset_func
dataset.update({"embedding_model": embedding_model})
assert dataset.embedding_model == embedding_model, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.embedding_model == embedding_model, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("unknown_llm_name", "unknown@ZHIPU-AI"),
("unknown_llm_factory", "embedding-3@unknown"),
("tenant_no_auth_default_tenant_llm", "text-embedding-v3@Tongyi-Qianwen"),
("tenant_no_auth", "text-embedding-3-small@OpenAI"),
],
ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"],
)
def test_embedding_model_invalid(self, add_dataset_func, name, embedding_model):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name, "embedding_model": embedding_model})
error_msg = str(exception_info.value)
if "tenant_no_auth" in name:
assert error_msg == f"Unauthorized model: <{embedding_model}>", error_msg
else:
assert error_msg == f"Unsupported model: <{embedding_model}>", error_msg
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("empty", ""),
("space", " "),
("missing_at", "BAAI/bge-small-en-v1.5Builtin"),
("missing_model_name", "@Builtin"),
("missing_provider", "BAAI/bge-small-en-v1.5@"),
("whitespace_only_model_name", " @Builtin"),
("whitespace_only_provider", "BAAI/bge-small-en-v1.5@ "),
],
ids=["empty", "space", "missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"],
)
def test_embedding_model_format(self, add_dataset_func, name, embedding_model):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"name": name, "embedding_model": embedding_model})
error_msg = str(exception_info.value)
if name in ["empty", "space", "missing_at"]:
assert "Embedding model identifier must follow <model_name>@<provider> format" in error_msg, error_msg
else:
assert "Both model_name and provider must be non-empty strings" in error_msg, error_msg
@pytest.mark.p2
def test_embedding_model_none(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"embedding_model": None})
assert dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(retrieved_dataset)
@pytest.mark.p1
@pytest.mark.parametrize(
"permission",
[
"me",
"team",
],
ids=["me", "team"],
)
def test_permission(self, client, add_dataset_func, permission):
dataset = add_dataset_func
dataset.update({"permission": permission})
assert dataset.permission == permission.lower().strip(), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.permission == permission.lower().strip(), str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"permission",
[
"",
"unknown",
list(),
"ME",
"TEAM",
" ME ",
],
ids=["empty", "unknown", "type_error", "me_upercase", "team_upercase", "whitespace"],
)
def test_permission_invalid(self, add_dataset_func, permission):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"permission": permission})
assert "Input should be 'me' or 'team'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_permission_none(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"permission": None})
assert "Input should be 'me' or 'team'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"chunk_method",
[
"naive",
"book",
"email",
"laws",
"manual",
"one",
"paper",
"picture",
"presentation",
"qa",
"table",
"tag",
],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
)
def test_chunk_method(self, client, add_dataset_func, chunk_method):
dataset = add_dataset_func
dataset.update({"chunk_method": chunk_method})
assert dataset.chunk_method == chunk_method, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.chunk_method == chunk_method, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method",
[
"",
"unknown",
list(),
],
ids=["empty", "unknown", "type_error"],
)
def test_chunk_method_invalid(self, add_dataset_func, chunk_method):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"chunk_method": chunk_method})
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_chunk_method_none(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"chunk_method": None})
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="#8208")
@pytest.mark.p2
@pytest.mark.parametrize("pagerank", [0, 50, 100], ids=["min", "mid", "max"])
def test_pagerank(self, client, add_dataset_func, pagerank):
dataset = add_dataset_func
dataset.update({"pagerank": pagerank})
assert dataset.pagerank == pagerank, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.pagerank == pagerank, str(retrieved_dataset)
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="#8208")
@pytest.mark.p2
def test_pagerank_set_to_0(self, client, add_dataset_func):
dataset = add_dataset_func
dataset.update({"pagerank": 50})
assert dataset.pagerank == 50, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.pagerank == 50, str(retrieved_dataset)
dataset.update({"pagerank": 0})
assert dataset.pagerank == 0, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.pagerank == 0, str(retrieved_dataset)
@pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="#8208")
@pytest.mark.p2
def test_pagerank_infinity(self, client, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"pagerank": 50})
assert "'pagerank' can only be set when doc_engine is elasticsearch" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"pagerank, expected_message",
[
(-1, "Input should be greater than or equal to 0"),
(101, "Input should be less than or equal to 100"),
],
ids=["min_limit", "max_limit"],
)
def test_pagerank_invalid(self, add_dataset_func, pagerank, expected_message):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"pagerank": pagerank})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_pagerank_none(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"pagerank": None})
assert "Input should be a valid integer" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"parser_config",
[
{"auto_keywords": 0},
{"auto_keywords": 16},
{"auto_keywords": 32},
{"auto_questions": 0},
{"auto_questions": 5},
{"auto_questions": 10},
{"chunk_token_num": 1},
{"chunk_token_num": 1024},
{"chunk_token_num": 2048},
{"delimiter": "\n"},
{"delimiter": " "},
{"html4excel": True},
{"html4excel": False},
{"layout_recognize": "DeepDOC"},
{"layout_recognize": "Plain Text"},
{"tag_kb_ids": ["1", "2"]},
{"topn_tags": 1},
{"topn_tags": 5},
{"topn_tags": 10},
{"filename_embd_weight": 0.1},
{"filename_embd_weight": 0.5},
{"filename_embd_weight": 1.0},
{"task_page_size": 1},
{"task_page_size": None},
{"pages": [[1, 100]]},
{"pages": None},
{"graphrag": {"use_graphrag": True}},
{"graphrag": {"use_graphrag": False}},
{"graphrag": {"entity_types": ["age", "sex", "height", "weight"]}},
{"graphrag": {"method": "general"}},
{"graphrag": {"method": "light"}},
{"graphrag": {"community": True}},
{"graphrag": {"community": False}},
{"graphrag": {"resolution": True}},
{"graphrag": {"resolution": False}},
{"raptor": {"use_raptor": True}},
{"raptor": {"use_raptor": False}},
{"raptor": {"prompt": "Who are you?"}},
{"raptor": {"max_token": 1}},
{"raptor": {"max_token": 1024}},
{"raptor": {"max_token": 2048}},
{"raptor": {"threshold": 0.0}},
{"raptor": {"threshold": 0.5}},
{"raptor": {"threshold": 1.0}},
{"raptor": {"max_cluster": 1}},
{"raptor": {"max_cluster": 512}},
{"raptor": {"max_cluster": 1024}},
{"raptor": {"random_seed": 0}},
],
ids=[
"auto_keywords_min",
"auto_keywords_mid",
"auto_keywords_max",
"auto_questions_min",
"auto_questions_mid",
"auto_questions_max",
"chunk_token_num_min",
"chunk_token_num_mid",
"chunk_token_num_max",
"delimiter",
"delimiter_space",
"html4excel_true",
"html4excel_false",
"layout_recognize_DeepDOC",
"layout_recognize_navie",
"tag_kb_ids",
"topn_tags_min",
"topn_tags_mid",
"topn_tags_max",
"filename_embd_weight_min",
"filename_embd_weight_mid",
"filename_embd_weight_max",
"task_page_size_min",
"task_page_size_None",
"pages",
"pages_none",
"graphrag_true",
"graphrag_false",
"graphrag_entity_types",
"graphrag_method_general",
"graphrag_method_light",
"graphrag_community_true",
"graphrag_community_false",
"graphrag_resolution_true",
"graphrag_resolution_false",
"raptor_true",
"raptor_false",
"raptor_prompt",
"raptor_max_token_min",
"raptor_max_token_mid",
"raptor_max_token_max",
"raptor_threshold_min",
"raptor_threshold_mid",
"raptor_threshold_max",
"raptor_max_cluster_min",
"raptor_max_cluster_mid",
"raptor_max_cluster_max",
"raptor_random_seed_min",
],
)
def test_parser_config(self, client, add_dataset_func, parser_config):
dataset = add_dataset_func
dataset.update({"parser_config": parser_config})
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(dataset.parser_config) == vv, str(dataset)
else:
assert attrgetter(k)(dataset.parser_config) == v, str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(retrieved_dataset.parser_config) == vv, str(retrieved_dataset)
else:
assert attrgetter(k)(retrieved_dataset.parser_config) == v, str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"parser_config, expected_message",
[
({"auto_keywords": -1}, "Input should be greater than or equal to 0"),
({"auto_keywords": 33}, "Input should be less than or equal to 32"),
({"auto_keywords": 3.14}, "Input should be a valid integer"),
({"auto_keywords": "string"}, "Input should be a valid integer"),
({"auto_questions": -1}, "Input should be greater than or equal to 0"),
({"auto_questions": 11}, "Input should be less than or equal to 10"),
({"auto_questions": 3.14}, "Input should be a valid integer"),
({"auto_questions": "string"}, "Input should be a valid integer"),
({"chunk_token_num": 0}, "Input should be greater than or equal to 1"),
({"chunk_token_num": 2049}, "Input should be less than or equal to 2048"),
({"chunk_token_num": 3.14}, "Input should be a valid integer"),
({"chunk_token_num": "string"}, "Input should be a valid integer"),
({"delimiter": ""}, "String should have at least 1 character"),
({"html4excel": "string"}, "Input should be a valid boolean"),
({"tag_kb_ids": "1,2"}, "Input should be a valid list"),
({"tag_kb_ids": [1, 2]}, "Input should be a valid string"),
({"topn_tags": 0}, "Input should be greater than or equal to 1"),
({"topn_tags": 11}, "Input should be less than or equal to 10"),
({"topn_tags": 3.14}, "Input should be a valid integer"),
({"topn_tags": "string"}, "Input should be a valid integer"),
({"filename_embd_weight": -1}, "Input should be greater than or equal to 0"),
({"filename_embd_weight": 1.1}, "Input should be less than or equal to 1"),
({"filename_embd_weight": "string"}, "Input should be a valid number"),
({"task_page_size": 0}, "Input should be greater than or equal to 1"),
({"task_page_size": 3.14}, "Input should be a valid integer"),
({"task_page_size": "string"}, "Input should be a valid integer"),
({"pages": "1,2"}, "Input should be a valid list"),
({"pages": ["1,2"]}, "Input should be a valid list"),
({"pages": [["string1", "string2"]]}, "Input should be a valid integer"),
({"graphrag": {"use_graphrag": "string"}}, "Input should be a valid boolean"),
({"graphrag": {"entity_types": "1,2"}}, "Input should be a valid list"),
({"graphrag": {"entity_types": [1, 2]}}, "nput should be a valid string"),
({"graphrag": {"method": "unknown"}}, "Input should be 'light' or 'general'"),
({"graphrag": {"method": None}}, "Input should be 'light' or 'general'"),
({"graphrag": {"community": "string"}}, "Input should be a valid boolean"),
({"graphrag": {"resolution": "string"}}, "Input should be a valid boolean"),
({"raptor": {"use_raptor": "string"}}, "Input should be a valid boolean"),
({"raptor": {"prompt": ""}}, "String should have at least 1 character"),
({"raptor": {"prompt": " "}}, "String should have at least 1 character"),
({"raptor": {"max_token": 0}}, "Input should be greater than or equal to 1"),
({"raptor": {"max_token": 2049}}, "Input should be less than or equal to 2048"),
({"raptor": {"max_token": 3.14}}, "Input should be a valid integer"),
({"raptor": {"max_token": "string"}}, "Input should be a valid integer"),
({"raptor": {"threshold": -0.1}}, "Input should be greater than or equal to 0"),
({"raptor": {"threshold": 1.1}}, "Input should be less than or equal to 1"),
({"raptor": {"threshold": "string"}}, "Input should be a valid number"),
({"raptor": {"max_cluster": 0}}, "Input should be greater than or equal to 1"),
({"raptor": {"max_cluster": 1025}}, "Input should be less than or equal to 1024"),
({"raptor": {"max_cluster": 3.14}}, "Input should be a valid integer"),
({"raptor": {"max_cluster": "string"}}, "Input should be a valid integer"),
({"raptor": {"random_seed": -1}}, "Input should be greater than or equal to 0"),
({"raptor": {"random_seed": 3.14}}, "Input should be a valid integer"),
({"raptor": {"random_seed": "string"}}, "Input should be a valid integer"),
({"delimiter": "a" * 65536}, "Parser config exceeds size limit (max 65,535 characters)"),
],
ids=[
"auto_keywords_min_limit",
"auto_keywords_max_limit",
"auto_keywords_float_not_allowed",
"auto_keywords_type_invalid",
"auto_questions_min_limit",
"auto_questions_max_limit",
"auto_questions_float_not_allowed",
"auto_questions_type_invalid",
"chunk_token_num_min_limit",
"chunk_token_num_max_limit",
"chunk_token_num_float_not_allowed",
"chunk_token_num_type_invalid",
"delimiter_empty",
"html4excel_type_invalid",
"tag_kb_ids_not_list",
"tag_kb_ids_int_in_list",
"topn_tags_min_limit",
"topn_tags_max_limit",
"topn_tags_float_not_allowed",
"topn_tags_type_invalid",
"filename_embd_weight_min_limit",
"filename_embd_weight_max_limit",
"filename_embd_weight_type_invalid",
"task_page_size_min_limit",
"task_page_size_float_not_allowed",
"task_page_size_type_invalid",
"pages_not_list",
"pages_not_list_in_list",
"pages_not_int_list",
"graphrag_type_invalid",
"graphrag_entity_types_not_list",
"graphrag_entity_types_not_str_in_list",
"graphrag_method_unknown",
"graphrag_method_none",
"graphrag_community_type_invalid",
"graphrag_resolution_type_invalid",
"raptor_type_invalid",
"raptor_prompt_empty",
"raptor_prompt_space",
"raptor_max_token_min_limit",
"raptor_max_token_max_limit",
"raptor_max_token_float_not_allowed",
"raptor_max_token_type_invalid",
"raptor_threshold_min_limit",
"raptor_threshold_max_limit",
"raptor_threshold_type_invalid",
"raptor_max_cluster_min_limit",
"raptor_max_cluster_max_limit",
"raptor_max_cluster_float_not_allowed",
"raptor_max_cluster_type_invalid",
"raptor_random_seed_min_limit",
"raptor_random_seed_float_not_allowed",
"raptor_random_seed_type_invalid",
"parser_config_type_invalid",
],
)
def test_parser_config_invalid(self, add_dataset_func, parser_config, expected_message):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update({"parser_config": parser_config})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_parser_config_empty(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
dataset.update({"parser_config": {}})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_none(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
dataset.update({"parser_config": None})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_empty_with_chunk_method_change(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
{
"raptor": {"use_raptor": False},
"graphrag": {"use_graphrag": False},
},
)
dataset.update({"chunk_method": "qa", "parser_config": {}})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_unset_with_chunk_method_change(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
{
"raptor": {"use_raptor": False},
"graphrag": {"use_graphrag": False},
},
)
dataset.update({"chunk_method": "qa"})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p3
def test_parser_config_none_with_chunk_method_change(self, client, add_dataset_func):
dataset = add_dataset_func
expected_config = DataSet.ParserConfig(
client,
{
"raptor": {"use_raptor": False},
"graphrag": {"use_graphrag": False},
},
)
dataset.update({"chunk_method": "qa", "parser_config": None})
assert str(dataset.parser_config) == str(expected_config), str(dataset)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert str(retrieved_dataset.parser_config) == str(expected_config), str(retrieved_dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
{"id": "id"},
{"tenant_id": "e57c1966f99211efb41e9e45646e0111"},
{"created_by": "created_by"},
{"create_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"create_time": 1741671443322},
{"update_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"update_time": 1741671443339},
{"document_count": 1},
{"chunk_count": 1},
{"token_num": 1},
{"status": "1"},
{"unknown_field": "unknown_field"},
],
)
def test_field_unsupported(self, add_dataset_func, payload):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.update(payload)
assert "Extra inputs are not permitted" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_field_unset(self, client, add_dataset_func):
dataset = add_dataset_func
original_dataset = client.get_dataset(name=dataset.name)
dataset.update({"name": "default_unset"})
updated_dataset = client.get_dataset(name="default_unset")
assert updated_dataset.avatar == original_dataset.avatar, str(updated_dataset)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_add_chunk.py | test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_add_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from time import sleep
import pytest
from ragflow_sdk import Chunk
def validate_chunk_details(dataset_id: str, document_id: str, payload: dict, chunk: Chunk):
assert chunk.dataset_id == dataset_id
assert chunk.document_id == document_id
assert chunk.content == payload["content"]
if "important_keywords" in payload:
assert chunk.important_keywords == payload["important_keywords"]
if "questions" in payload:
assert chunk.questions == [str(q).strip() for q in payload.get("questions", []) if str(q).strip()]
class TestAddChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": None}, "not instance of"),
({"content": ""}, "`content` is required"),
({"content": 1}, "not instance of"),
({"content": "a"}, ""),
({"content": " "}, "`content` is required"),
({"content": "\n!?。;!?\"'"}, ""),
],
)
def test_content(self, add_document, payload, expected_message):
dataset, document = add_document
chunks_count = len(document.list_chunks())
if expected_message:
with pytest.raises(Exception) as exception_info:
document.add_chunk(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": "chunk test important_keywords 1", "important_keywords": ["a", "b", "c"]}, ""),
({"content": "chunk test important_keywords 2", "important_keywords": [""]}, ""),
({"content": "chunk test important_keywords 3", "important_keywords": [1]}, "not instance of"),
({"content": "chunk test important_keywords 4", "important_keywords": ["a", "a"]}, ""),
({"content": "chunk test important_keywords 5", "important_keywords": "abc"}, "not instance of"),
({"content": "chunk test important_keywords 6", "important_keywords": 123}, "not instance of"),
],
)
def test_important_keywords(self, add_document, payload, expected_message):
dataset, document = add_document
chunks_count = len(document.list_chunks())
if expected_message:
with pytest.raises(Exception) as exception_info:
document.add_chunk(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": "chunk test test_questions 1", "questions": ["a", "b", "c"]}, ""),
({"content": "chunk test test_questions 2", "questions": [""]}, ""),
({"content": "chunk test test_questions 3", "questions": [1]}, "not instance of"),
({"content": "chunk test test_questions 4", "questions": ["a", "a"]}, ""),
({"content": "chunk test test_questions 5", "questions": "abc"}, "not instance of"),
({"content": "chunk test test_questions 6", "questions": 123}, "not instance of"),
],
)
def test_questions(self, add_document, payload, expected_message):
dataset, document = add_document
chunks_count = len(document.list_chunks())
if expected_message:
with pytest.raises(Exception) as exception_info:
document.add_chunk(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p3
def test_repeated_add_chunk(self, add_document):
payload = {"content": "chunk test repeated_add_chunk"}
dataset, document = add_document
chunks_count = len(document.list_chunks())
chunk1 = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk1)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
chunk2 = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk2)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p2
def test_add_chunk_to_deleted_document(self, add_document):
dataset, document = add_document
dataset.delete_documents(ids=[document.id])
with pytest.raises(Exception) as exception_info:
document.add_chunk(content="chunk test")
assert f"You don't own the document {document.id}" in str(exception_info.value), str(exception_info.value)
@pytest.mark.skip(reason="issues/6411")
@pytest.mark.p3
def test_concurrent_add_chunk(self, add_document):
count = 50
_, document = add_document
initial_chunk_count = len(document.list_chunks())
def add_chunk_task(i):
return document.add_chunk(content=f"chunk test concurrent {i}")
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(add_chunk_task, i) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
sleep(5)
assert len(document.list_chunks(page_size=100)) == initial_chunk_count + count
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_retrieval_chunks.py | test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_retrieval_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
class TestChunksRetrieval:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"question": "chunk", "dataset_ids": None}, 4, ""),
({"question": "chunk", "document_ids": None}, 0, "missing 1 required positional argument"),
({"question": "chunk", "dataset_ids": None, "document_ids": None}, 4, ""),
({"question": "chunk"}, 0, "missing 1 required positional argument"),
],
)
def test_basic_scenarios(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, document, _ = add_chunks
if "dataset_ids" in payload:
payload["dataset_ids"] = [dataset.id]
if "document_ids" in payload:
payload["document_ids"] = [document.id]
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
pytest.param(
{"page": None, "page_size": 2},
2,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not \'NoneType\'")""",
marks=pytest.mark.skip,
),
pytest.param(
{"page": 0, "page_size": 2},
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 0, ""),
({"page": "3", "page_size": 2}, 0, ""),
pytest.param(
{"page": -1, "page_size": 2},
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param(
{"page": "a", "page_size": 2},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_page(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
pytest.param(
{"page_size": None},
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not \'NoneType\'")""",
marks=pytest.mark.skip,
),
pytest.param({"page_size": 1}, 1, "", marks=pytest.mark.skip(reason="issues/10692")),
({"page_size": 5}, 4, ""),
pytest.param({"page_size": "1"}, 1, "", marks=pytest.mark.skip(reason="issues/10692")),
pytest.param(
{"page_size": "a"},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_page_size(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"vector_similarity_weight": 0}, 4, ""),
({"vector_similarity_weight": 0.5}, 4, ""),
({"vector_similarity_weight": 10}, 4, ""),
pytest.param(
{"vector_similarity_weight": "a"},
0,
"""ValueError("could not convert string to float: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_vector_similarity_weight(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"top_k": 10}, 4, ""),
pytest.param(
{"top_k": 1},
4,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": 1},
1,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": -1},
4,
"must be greater than 0",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": -1},
4,
"3014",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": "a"},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_top_k(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_message",
[
({"rerank_id": "BAAI/bge-reranker-v2-m3"}, ""),
pytest.param({"rerank_id": "unknown"}, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip),
],
)
def test_rerank_id(self, client, add_chunks, payload, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) > 0, str(chunks)
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"keyword": True}, 5, ""),
({"keyword": "True"}, 5, ""),
({"keyword": False}, 5, ""),
({"keyword": "False"}, 5, ""),
({"keyword": None}, 5, ""),
],
)
def test_keyword(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk test", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p3
def test_concurrent_retrieval(self, client, add_chunks):
dataset, _, _ = add_chunks
count = 100
payload = {"question": "chunk", "dataset_ids": [dataset.id]}
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.retrieve, **payload) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_update_chunk.py | test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_update_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
import pytest
class TestUpdatedChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": None}, ""),
pytest.param(
{"content": ""},
"""APIRequestFailedError(\'Error code: 400, with error text {"error":{"code":"1213","message":"未正常接收到prompt参数。"}}\')""",
marks=pytest.mark.skip(reason="issues/6541"),
),
pytest.param(
{"content": 1},
"TypeError('expected string or bytes-like object')",
marks=pytest.mark.skip,
),
({"content": "update chunk"}, ""),
pytest.param(
{"content": " "},
"""APIRequestFailedError(\'Error code: 400, with error text {"error":{"code":"1213","message":"未正常接收到prompt参数。"}}\')""",
marks=pytest.mark.skip(reason="issues/6541"),
),
({"content": "\n!?。;!?\"'"}, ""),
],
)
def test_content(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"important_keywords": ["a", "b", "c"]}, ""),
({"important_keywords": [""]}, ""),
({"important_keywords": [1]}, "TypeError('sequence item 0: expected str instance, int found')"),
({"important_keywords": ["a", "a"]}, ""),
({"important_keywords": "abc"}, "`important_keywords` should be a list"),
({"important_keywords": 123}, "`important_keywords` should be a list"),
],
)
def test_important_keywords(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"questions": ["a", "b", "c"]}, ""),
({"questions": [""]}, ""),
({"questions": [1]}, "TypeError('sequence item 0: expected str instance, int found')"),
({"questions": ["a", "a"]}, ""),
({"questions": "abc"}, "`questions` should be a list"),
({"questions": 123}, "`questions` should be a list"),
],
)
def test_questions(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"available": True}, ""),
pytest.param({"available": "True"}, """ValueError("invalid literal for int() with base 10: \'True\'")""", marks=pytest.mark.skip),
({"available": 1}, ""),
({"available": False}, ""),
pytest.param({"available": "False"}, """ValueError("invalid literal for int() with base 10: \'False\'")""", marks=pytest.mark.skip),
({"available": 0}, ""),
],
)
def test_available(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p3
def test_repeated_update_chunk(self, add_chunks):
_, _, chunks = add_chunks
chunk = chunks[0]
chunk.update({"content": "chunk test 1"})
chunk.update({"content": "chunk test 2"})
@pytest.mark.p3
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554")
def test_concurrent_update_chunk(self, add_chunks):
count = 50
_, _, chunks = add_chunks
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chunks[randint(0, 3)].update, {"content": f"update chunk test {i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_update_chunk_to_deleted_document(self, add_chunks):
dataset, document, chunks = add_chunks
dataset.delete_documents(ids=[document.id])
with pytest.raises(Exception) as exception_info:
chunks[0].update({})
assert str(exception_info.value) in [f"You don't own the document {chunks[0].document_id}", f"Can't find this chunk {chunks[0].id}"], str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chunk_management_within_dataset/conftest.py | test/testcases/test_sdk_api/test_chunk_management_within_dataset/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import sleep
import pytest
from common import batch_add_chunks
from pytest import FixtureRequest
from ragflow_sdk import Chunk, DataSet, Document
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def condition(_dataset: DataSet):
documents = _dataset.list_documents(page_size=1000)
for document in documents:
if document.run != "DONE":
return False
return True
@pytest.fixture(scope="function")
def add_chunks_func(request: FixtureRequest, add_document: tuple[DataSet, Document]) -> tuple[DataSet, Document, list[Chunk]]:
def cleanup():
try:
document.delete_chunks(ids=[])
except Exception:
pass
request.addfinalizer(cleanup)
dataset, document = add_document
dataset.async_parse_documents([document.id])
condition(dataset)
chunks = batch_add_chunks(document, 4)
# issues/6487
sleep(1)
return dataset, document, chunks
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_delete_chunks.py | test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_delete_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks
class TestChunksDeletion:
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:4]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, add_chunks_func, payload):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
payload = payload(chunk_ids)
with pytest.raises(Exception) as exception_info:
document.delete_chunks(**payload)
assert "rm_chunk deleted chunks" in str(exception_info.value), str(exception_info.value)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 1, str(remaining_chunks)
@pytest.mark.p3
def test_repeated_deletion(self, add_chunks_func):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
document.delete_chunks(ids=chunk_ids)
with pytest.raises(Exception) as exception_info:
document.delete_chunks(ids=chunk_ids)
assert "rm_chunk deleted chunks 0, expect" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_duplicate_deletion(self, add_chunks_func):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
document.delete_chunks(ids=chunk_ids * 2)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 1, str(remaining_chunks)
@pytest.mark.p3
def test_concurrent_deletion(self, add_document):
count = 100
_, document = add_document
chunks = batch_add_chunks(document, count)
chunk_ids = [chunk.id for chunk in chunks]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(document.delete_chunks, ids=[chunk_id]) for chunk_id in chunk_ids]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_delete_1k(self, add_document):
count = 1_000
_, document = add_document
chunks = batch_add_chunks(document, count)
chunk_ids = [chunk.id for chunk in chunks]
from time import sleep
sleep(1)
document.delete_chunks(ids=chunk_ids)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 0, str(remaining_chunks)
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
pytest.param(None, "TypeError", 5, marks=pytest.mark.skip),
pytest.param({"ids": ["invalid_id"]}, "rm_chunk deleted chunks 0, expect 1", 5, marks=pytest.mark.p3),
pytest.param("not json", "UnboundLocalError", 5, marks=pytest.mark.skip(reason="pull/6376")),
pytest.param(lambda r: {"ids": r[:1]}, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, "", 1, marks=pytest.mark.p1),
pytest.param({"ids": []}, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(self, add_chunks_func, payload, expected_message, remaining):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
if callable(payload):
payload = payload(chunk_ids)
if expected_message:
with pytest.raises(Exception) as exception_info:
document.delete_chunks(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.delete_chunks(**payload)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == remaining, str(remaining_chunks)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_list_chunks.py | test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_list_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks
class TestChunksList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 2, ""),
pytest.param({"page": 0, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 1, ""),
pytest.param({"page": -1, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "page_size": 2}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, add_chunks, params, expected_page_size, expected_message):
_, document, _ = add_chunks
if expected_message:
with pytest.raises(Exception) as exception_info:
document.list_chunks(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = document.list_chunks(**params)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": None}, 5, ""),
pytest.param({"page_size": 0}, 5, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 1, ""),
pytest.param({"page_size": -1}, 5, "", marks=pytest.mark.skip),
pytest.param({"page_size": "a"}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, add_chunks, params, expected_page_size, expected_message):
_, document, _ = add_chunks
if expected_message:
with pytest.raises(Exception) as exception_info:
document.list_chunks(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = document.list_chunks(**params)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "1"}, 1),
({"keywords": "chunk"}, 4),
pytest.param({"keywords": "ragflow"}, 1, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6509")),
pytest.param({"keywords": "ragflow"}, 5, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="issues/6509")),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, add_chunks, params, expected_page_size):
_, document, _ = add_chunks
chunks = document.list_chunks(**params)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p1
@pytest.mark.parametrize(
"chunk_id, expected_page_size, expected_message",
[
(None, 5, ""),
("", 5, ""),
pytest.param(lambda r: r[0], 1, "", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6499")),
pytest.param("unknown", 0, """AttributeError("\'NoneType\' object has no attribute \'keys\'")""", marks=pytest.mark.skip),
],
)
def test_id(self, add_chunks, chunk_id, expected_page_size, expected_message):
_, document, chunks = add_chunks
chunk_ids = [chunk.id for chunk in chunks]
if callable(chunk_id):
params = {"id": chunk_id(chunk_ids)}
else:
params = {"id": chunk_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
document.list_chunks(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = document.list_chunks(**params)
if params["id"] in [None, ""]:
assert len(chunks) == expected_page_size, str(chunks)
else:
assert chunks[0].id == params["id"], str(chunks)
@pytest.mark.p3
def test_concurrent_list(self, add_chunks):
_, document, _ = add_chunks
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(document.list_chunks) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(len(future.result()) == 5 for future in futures)
@pytest.mark.p1
def test_default(self, add_document):
_, document = add_document
batch_add_chunks(document, 31)
from time import sleep
sleep(3)
chunks = document.list_chunks()
assert len(chunks) == 30, str(chunks)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_session_management/test_list_sessions_with_chat_assistant.py | test/testcases/test_sdk_api/test_session_management/test_list_sessions_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from concurrent.futures import ThreadPoolExecutor, as_completed
class TestSessionsWithChatAssistantList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, "not instance of"),
pytest.param({"page": 0, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 0, "not instance of"),
pytest.param({"page": -1, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "page_size": 2}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, add_sessions_with_chat_assistant, params, expected_page_size, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
sessions = chat_assistant.list_sessions(**params)
assert len(sessions) == expected_page_size
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": None}, 0, "not instance of"),
({"page_size": 0}, 0, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 0, "not instance of"),
pytest.param({"page_size": -1}, 5, "", marks=pytest.mark.skip),
pytest.param({"page_size": "a"}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, add_sessions_with_chat_assistant, params, expected_page_size, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
sessions = chat_assistant.list_sessions(**params)
assert len(sessions) == expected_page_size
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"orderby": None}, "not instance of"),
({"orderby": "create_time"}, ""),
({"orderby": "update_time"}, ""),
({"orderby": "name", "desc": "False"}, "not instance of"),
pytest.param({"orderby": "unknown"}, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/")),
],
)
def test_orderby(self, add_sessions_with_chat_assistant, params, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
chat_assistant.list_sessions(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"desc": None}, "not instance of"),
({"desc": "true"}, "not instance of"),
({"desc": "True"}, "not instance of"),
({"desc": True}, ""),
({"desc": "false"}, "not instance of"),
({"desc": "False"}, "not instance of"),
({"desc": False}, ""),
({"desc": "False", "orderby": "update_time"}, "not instance of"),
pytest.param({"desc": "unknown"}, "desc should be true or false", marks=pytest.mark.skip(reason="issues/")),
],
)
def test_desc(self, add_sessions_with_chat_assistant, params, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
chat_assistant.list_sessions(**params)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_num, expected_message",
[
({"name": None}, 0, "not instance of"),
({"name": ""}, 5, ""),
({"name": "session_with_chat_assistant_1"}, 1, ""),
({"name": "unknown"}, 0, ""),
],
)
def test_name(self, add_sessions_with_chat_assistant, params, expected_num, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
sessions = chat_assistant.list_sessions(**params)
if params["name"] == "session_with_chat_assistant_1":
assert sessions[0].name == params["name"]
else:
assert len(sessions) == expected_num
@pytest.mark.p1
@pytest.mark.parametrize(
"session_id, expected_num, expected_message",
[
(None, 0, "not instance of"),
("", 5, ""),
(lambda r: r[0], 1, ""),
("unknown", 0, ""),
],
)
def test_id(self, add_sessions_with_chat_assistant, session_id, expected_num, expected_message):
chat_assistant, sessions = add_sessions_with_chat_assistant
if callable(session_id):
params = {"id": session_id([s.id for s in sessions])}
else:
params = {"id": session_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
list_sessions = chat_assistant.list_sessions(**params)
if "id" in params and params["id"] == sessions[0].id:
assert list_sessions[0].id == params["id"]
else:
assert len(list_sessions) == expected_num
@pytest.mark.p3
@pytest.mark.parametrize(
"session_id, name, expected_num, expected_message",
[
(lambda r: r[0], "session_with_chat_assistant_0", 1, ""),
(lambda r: r[0], "session_with_chat_assistant_100", 0, ""),
(lambda r: r[0], "unknown", 0, ""),
("id", "session_with_chat_assistant_0", 0, ""),
],
)
def test_name_and_id(self, add_sessions_with_chat_assistant, session_id, name, expected_num, expected_message):
chat_assistant, sessions = add_sessions_with_chat_assistant
if callable(session_id):
params = {"id": session_id([s.id for s in sessions]), "name": name}
else:
params = {"id": session_id, "name": name}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
list_sessions = chat_assistant.list_sessions(**params)
assert len(list_sessions) == expected_num
@pytest.mark.p3
def test_concurrent_list(self, add_sessions_with_chat_assistant):
count = 100
chat_assistant, _ = add_sessions_with_chat_assistant
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chat_assistant.list_sessions) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_list_chats_after_deleting_associated_chat_assistant(self, client, add_sessions_with_chat_assistant):
chat_assistant, _ = add_sessions_with_chat_assistant
client.delete_chats(ids=[chat_assistant.id])
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions()
assert "You don't own the assistant" in str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_session_management/test_delete_sessions_with_chat_assistant.py | test/testcases/test_sdk_api/test_session_management/test_delete_sessions_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_sessions_with_chat_assistant
class TestSessionWithChatAssistantDelete:
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, add_sessions_with_chat_assistant_func, payload):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
if callable(payload):
payload = payload([session.id for session in sessions])
chat_assistant.delete_sessions(**payload)
sessions = chat_assistant.list_sessions()
assert len(sessions) == 0
@pytest.mark.p3
def test_repeated_deletion(self, add_sessions_with_chat_assistant_func):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
session_ids = {"ids": [session.id for session in sessions]}
chat_assistant.delete_sessions(**session_ids)
with pytest.raises(Exception) as exception_info:
chat_assistant.delete_sessions(**session_ids)
assert "The chat doesn't own the session" in str(exception_info.value)
@pytest.mark.p3
def test_duplicate_deletion(self, add_sessions_with_chat_assistant_func):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
session_ids = {"ids": [session.id for session in sessions] * 2}
chat_assistant.delete_sessions(**session_ids)
sessions = chat_assistant.list_sessions()
assert len(sessions) == 0
@pytest.mark.p3
def test_concurrent_deletion(self, add_chat_assistants):
count = 100
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
sessions = batch_add_sessions_with_chat_assistant(chat_assistant, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chat_assistant.delete_sessions, ids=[sessions[i].id]) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_delete_1k(self, add_chat_assistants):
count = 1_000
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
ssessions = batch_add_sessions_with_chat_assistant(chat_assistant, count)
chat_assistant.delete_sessions(ids=[ssession.id for ssession in ssessions])
sessions = chat_assistant.list_sessions()
assert len(sessions) == 0
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
pytest.param(None, """TypeError("argument of type \'NoneType\' is not iterable")""", 0, marks=pytest.mark.skip),
pytest.param({"ids": ["invalid_id"]}, "The chat doesn't own the session invalid_id", 5, marks=pytest.mark.p3),
pytest.param("not json", """AttributeError("\'str\' object has no attribute \'get\'")""", 5, marks=pytest.mark.skip),
pytest.param(lambda r: {"ids": r[:1]}, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, "", 0, marks=pytest.mark.p1),
pytest.param({"ids": []}, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(self, add_sessions_with_chat_assistant_func, payload, expected_message, remaining):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
if callable(payload):
payload = payload([session.id for session in sessions])
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.delete_sessions(**payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.delete_sessions(**payload)
sessions = chat_assistant.list_sessions()
assert len(sessions) == remaining
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_session_management/conftest.py | test/testcases/test_sdk_api/test_session_management/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import batch_add_sessions_with_chat_assistant
from pytest import FixtureRequest
from ragflow_sdk import Chat, DataSet, Document, Session
@pytest.fixture(scope="class")
def add_sessions_with_chat_assistant(request: FixtureRequest, add_chat_assistants: tuple[DataSet, Document, list[Chat]]) -> tuple[Chat, list[Session]]:
def cleanup():
for chat_assistant in chat_assistants:
try:
chat_assistant.delete_sessions(ids=None)
except Exception as e:
print(f"Exception: {e}")
pass
request.addfinalizer(cleanup)
_, _, chat_assistants = add_chat_assistants
return chat_assistants[0], batch_add_sessions_with_chat_assistant(chat_assistants[0], 5)
@pytest.fixture(scope="function")
def add_sessions_with_chat_assistant_func(request: FixtureRequest, add_chat_assistants: tuple[DataSet, Document, list[Chat]]) -> tuple[Chat, list[Session]]:
def cleanup():
for chat_assistant in chat_assistants:
try:
chat_assistant.delete_sessions(ids=None)
except Exception as e:
print(f"Exception: {e}")
request.addfinalizer(cleanup)
_, _, chat_assistants = add_chat_assistants
return chat_assistants[0], batch_add_sessions_with_chat_assistant(chat_assistants[0], 5)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_session_management/test_create_session_with_chat_assistant.py | test/testcases/test_sdk_api/test_session_management/test_create_session_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import SESSION_WITH_CHAT_NAME_LIMIT
@pytest.mark.usefixtures("clear_session_with_chat_assistants")
class TestSessionWithChatAssistantCreate:
@pytest.mark.p1
@pytest.mark.parametrize(
"name, expected_message",
[
("valid_name", ""),
pytest.param("a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1), "", marks=pytest.mark.skip(reason="issues/")),
pytest.param(1, "", marks=pytest.mark.skip(reason="issues/")),
("", "`name` can not be empty."),
("duplicated_name", ""),
("case insensitive", ""),
],
)
def test_name(self, add_chat_assistants, name, expected_message):
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
if name == "duplicated_name":
chat_assistant.create_session(name=name)
elif name == "case insensitive":
chat_assistant.create_session(name=name.upper())
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.create_session(name=name)
assert expected_message in str(exception_info.value)
else:
session = chat_assistant.create_session(name=name)
assert session.name == name, str(session)
assert session.chat_id == chat_assistant.id, str(session)
@pytest.mark.p3
def test_concurrent_create_session(self, add_chat_assistants):
count = 1000
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chat_assistant.create_session, name=f"session with chat assistant test {i}") for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
updated_sessions = chat_assistant.list_sessions(page_size=count * 2)
assert len(updated_sessions) == count
@pytest.mark.p3
def test_add_session_to_deleted_chat_assistant(self, client, add_chat_assistants):
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
client.delete_chats(ids=[chat_assistant.id])
with pytest.raises(Exception) as exception_info:
chat_assistant.create_session(name="valid_name")
assert "You do not own the assistant" in str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_session_management/test_update_session_with_chat_assistant.py | test/testcases/test_sdk_api/test_session_management/test_update_session_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
import pytest
from configs import SESSION_WITH_CHAT_NAME_LIMIT
class TestSessionWithChatAssistantUpdate:
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param({"name": "valid_name"}, "", marks=pytest.mark.p1),
pytest.param({"name": "a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1)}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": ""}, "`name` can not be empty.", marks=pytest.mark.p3),
pytest.param({"name": "duplicated_name"}, "", marks=pytest.mark.p3),
pytest.param({"name": "case insensitive"}, "", marks=pytest.mark.p3),
],
)
def test_name(self, add_sessions_with_chat_assistant_func, payload, expected_message):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
session = sessions[0]
if payload["name"] == "duplicated_name":
session.update(payload)
elif payload["name"] == "case insensitive":
session.update({"name": payload["name"].upper()})
if expected_message:
with pytest.raises(Exception) as exception_info:
session.update(payload)
assert expected_message in str(exception_info.value)
else:
session.update(payload)
updated_session = chat_assistant.list_sessions(id=session.id)[0]
assert updated_session.name == payload["name"]
@pytest.mark.p3
def test_repeated_update_session(self, add_sessions_with_chat_assistant_func):
_, sessions = add_sessions_with_chat_assistant_func
session = sessions[0]
session.update({"name": "valid_name_1"})
session.update({"name": "valid_name_2"})
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param({"unknown_key": "unknown_value"}, "ValueError", marks=pytest.mark.skip),
({}, ""),
pytest.param(None, "TypeError", marks=pytest.mark.skip),
],
)
def test_invalid_params(self, add_sessions_with_chat_assistant_func, payload, expected_message):
_, sessions = add_sessions_with_chat_assistant_func
session = sessions[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
session.update(payload)
assert expected_message in str(exception_info.value)
else:
session.update(payload)
@pytest.mark.p3
def test_concurrent_update_session(self, add_sessions_with_chat_assistant_func):
count = 50
_, sessions = add_sessions_with_chat_assistant_func
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(sessions[randint(0, 4)].update, {"name": f"update session test {i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_update_session_to_deleted_chat_assistant(self, client, add_sessions_with_chat_assistant_func):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
client.delete_chats(ids=[chat_assistant.id])
with pytest.raises(Exception) as exception_info:
sessions[0].update({"name": "valid_name"})
assert "You do not own the session" in str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_stop_parse_documents.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_stop_parse_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
def validate_document_parse_done(dataset, document_ids):
documents = dataset.list_documents(page_size=1000)
for document in documents:
if document.id in document_ids:
assert document.run == "DONE"
assert len(document.process_begin_at) > 0
assert document.process_duration > 0
assert document.progress > 0
assert "Task done" in document.progress_msg
def validate_document_parse_cancel(dataset, document_ids):
documents = dataset.list_documents(page_size=1000)
for document in documents:
assert document.run == "CANCEL"
assert len(document.process_begin_at) > 0
assert document.progress == 0.0
@pytest.mark.skip
class TestDocumentsParseStop:
pass
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_download_document.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_download_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents
from utils import compare_by_hash
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(add_dataset, generate_test_files, request):
dataset = add_dataset
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
with fp.with_stem("ragflow_test_download").open("wb") as f:
f.write(document.download())
assert compare_by_hash(fp, fp.with_stem("ragflow_test_download"))
class TestDocumentDownload:
@pytest.mark.p3
def test_same_file_repeat(self, add_documents, tmp_path, ragflow_tmp_dir):
num = 5
_, documents = add_documents
for i in range(num):
download_path = tmp_path / f"ragflow_test_download_{i}.txt"
with download_path.open("wb") as f:
f.write(documents[0].download())
assert compare_by_hash(ragflow_tmp_dir / "ragflow_test_upload_0.txt", download_path), f"Downloaded file {i} does not match original"
@pytest.mark.p3
def test_concurrent_download(add_dataset, tmp_path):
count = 20
dataset = add_dataset
documents = bulk_upload_documents(dataset, count, tmp_path)
def download_doc(document, i):
download_path = tmp_path / f"ragflow_test_download_{i}.txt"
with download_path.open("wb") as f:
f.write(document.download())
# assert compare_by_hash(tmp_path / f"ragflow_test_upload_{i}.txt", download_path), str(download_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(download_doc, documents[i], i) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
for i in range(count):
assert compare_by_hash(
tmp_path / f"ragflow_test_upload_{i}.txt",
tmp_path / f"ragflow_test_download_{i}.txt",
)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_upload_documents.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_upload_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import string
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import DOCUMENT_NAME_LIMIT
from utils.file_utils import create_txt_file
class TestDocumentsUpload:
@pytest.mark.p1
def test_valid_single_upload(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(self, add_dataset_func, generate_test_files, request):
dataset = add_dataset_func
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p2
@pytest.mark.parametrize(
"file_type",
["exe", "unknown"],
)
def test_unsupported_file_type(self, add_dataset_func, tmp_path, file_type):
dataset = add_dataset_func
fp = tmp_path / f"ragflow_test.{file_type}"
fp.touch()
with fp.open("rb") as f:
blob = f.read()
with pytest.raises(Exception) as exception_info:
dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
assert str(exception_info.value) == f"ragflow_test.{file_type}: This type of file has not been supported yet!", str(exception_info.value)
@pytest.mark.p2
def test_missing_file(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.upload_documents([])
assert str(exception_info.value) == "No file part!", str(exception_info.value)
@pytest.mark.p3
def test_empty_file(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = tmp_path / "empty.txt"
fp.touch()
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.size == 0, str(document)
@pytest.mark.p3
def test_filename_empty(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
with pytest.raises(Exception) as exception_info:
dataset.upload_documents([{"display_name": "", "blob": blob}])
assert str(exception_info.value) == "No file selected!", str(exception_info.value)
@pytest.mark.p2
def test_filename_max_length(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p2
def test_duplicate_files(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}, {"display_name": fp.name, "blob": blob}])
assert len(documents) == 2, str(documents)
for i, document in enumerate(documents):
assert document.dataset_id == dataset.id, str(document)
expected_name = fp.name if i == 0 else f"{fp.stem}({i}){fp.suffix}"
assert document.name == expected_name, str(document)
@pytest.mark.p2
def test_same_file_repeat(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
for i in range(3):
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
assert len(documents) == 1, str(documents)
document = documents[0]
assert document.dataset_id == dataset.id, str(document)
expected_name = fp.name if i == 0 else f"{fp.stem}({i}){fp.suffix}"
assert document.name == expected_name, str(document)
@pytest.mark.p3
def test_filename_special_characters(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
illegal_chars = '<>:"/\\|?*'
translation_table = str.maketrans({char: "_" for char in illegal_chars})
safe_filename = string.punctuation.translate(translation_table)
fp = tmp_path / f"{safe_filename}.txt"
fp.write_text("Sample text content")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
assert len(documents) == 1, str(documents)
document = documents[0]
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p1
def test_multiple_files(self, client, add_dataset_func, tmp_path):
dataset = add_dataset_func
expected_document_count = 20
document_infos = []
for i in range(expected_document_count):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
with fp.open("rb") as f:
blob = f.read()
document_infos.append({"display_name": fp.name, "blob": blob})
documents = dataset.upload_documents(document_infos)
assert len(documents) == expected_document_count, str(documents)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.document_count == expected_document_count, str(retrieved_dataset)
@pytest.mark.p3
def test_concurrent_upload(self, client, add_dataset_func, tmp_path):
dataset = add_dataset_func
count = 20
fps = [create_txt_file(tmp_path / f"ragflow_test_{i}.txt") for i in range(count)]
def upload_file(fp):
with fp.open("rb") as f:
blob = f.read()
return dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(upload_file, fp) for fp in fps]
responses = list(as_completed(futures))
assert len(responses) == count, responses
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.document_count == count, str(retrieved_dataset)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
class TestDocumentsList:
@pytest.mark.p1
def test_default(self, add_documents):
dataset, _ = add_documents
documents = dataset.list_documents()
assert len(documents) == 5, str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 2, "not instance of"),
({"page": 0, "page_size": 2}, 2, ""),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 1, "not instance of"),
pytest.param(
{"page": -1, "page_size": 2},
0,
"Invalid page number",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page": "a", "page_size": 2},
0,
"Invalid page value",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page(self, add_documents, params, expected_page_size, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_page_size, str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": None}, 5, "not instance of"),
({"page_size": 0}, 0, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 1, "not instance of"),
pytest.param(
{"page_size": -1},
0,
"Invalid page size",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page_size": "a"},
0,
"Invalid page size value",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page_size(self, add_documents, params, expected_page_size, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_page_size, str(documents)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"orderby": None}, "not instance of"),
({"orderby": "create_time"}, ""),
({"orderby": "update_time"}, ""),
pytest.param({"orderby": "name", "desc": "False"}, "", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"orderby": "unknown"}, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_orderby(self, add_documents, params, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.list_documents(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"desc": None}, "not instance of"),
({"desc": "true"}, "not instance of"),
({"desc": "True"}, "not instance of"),
({"desc": True}, ""),
pytest.param({"desc": "false"}, "", marks=pytest.mark.skip(reason="issues/5851")),
({"desc": "False"}, "not instance of"),
({"desc": False}, ""),
({"desc": "False", "orderby": "update_time"}, "not instance of"),
pytest.param({"desc": "unknown"}, "desc should be true or false", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_desc(self, add_documents, params, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.list_documents(**params)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_num",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "0"}, 1),
({"keywords": "ragflow_test_upload"}, 5),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, add_documents, params, expected_num):
dataset, _ = add_documents
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_num, expected_message",
[
({"name": None}, 5, ""),
({"name": ""}, 5, ""),
({"name": "ragflow_test_upload_0.txt"}, 1, ""),
({"name": "unknown.txt"}, 0, "You don't own the document unknown.txt"),
],
)
def test_name(self, add_documents, params, expected_num, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
if params["name"] not in [None, ""]:
assert documents[0].name == params["name"], str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"document_id, expected_num, expected_message",
[
(None, 5, ""),
("", 5, ""),
(lambda docs: docs[0].id, 1, ""),
("unknown.txt", 0, "You don't own the document unknown.txt"),
],
)
def test_id(self, add_documents, document_id, expected_num, expected_message):
dataset, documents = add_documents
if callable(document_id):
params = {"id": document_id(documents)}
else:
params = {"id": document_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
if params["id"] not in [None, ""]:
assert documents[0].id == params["id"], str(documents)
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, name, expected_num, expected_message",
[
(lambda docs: docs[0].id, "ragflow_test_upload_0.txt", 1, ""),
(lambda docs: docs[0].id, "ragflow_test_upload_1.txt", 0, ""),
(lambda docs: docs[0].id, "unknown", 0, "You don't own the document unknown"),
("invalid_id", "ragflow_test_upload_0.txt", 0, "You don't own the document invalid_id"),
],
)
def test_name_and_id(self, add_documents, document_id, name, expected_num, expected_message):
dataset, documents = add_documents
params = {"id": document_id(documents) if callable(document_id) else document_id, "name": name}
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
@pytest.mark.p3
def test_concurrent_list(self, add_documents):
dataset, _ = add_documents
count = 100
def list_docs():
return dataset.list_documents()
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_docs) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
for future in futures:
docs = future.result()
assert len(docs) == 5, str(docs)
@pytest.mark.p3
def test_invalid_params(self, add_documents):
dataset, _ = add_documents
params = {"a": "b"}
with pytest.raises(TypeError) as exception_info:
dataset.list_documents(**params)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/conftest.py | test/testcases/test_sdk_api/test_file_management_within_dataset/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import bulk_upload_documents
from pytest import FixtureRequest
from ragflow_sdk import DataSet, Document
@pytest.fixture(scope="function")
def add_document_func(request: FixtureRequest, add_dataset: DataSet, ragflow_tmp_dir) -> tuple[DataSet, Document]:
dataset = add_dataset
documents = bulk_upload_documents(dataset, 1, ragflow_tmp_dir)
def cleanup():
dataset.delete_documents(ids=None)
request.addfinalizer(cleanup)
return dataset, documents[0]
@pytest.fixture(scope="class")
def add_documents(request: FixtureRequest, add_dataset: DataSet, ragflow_tmp_dir) -> tuple[DataSet, list[Document]]:
dataset = add_dataset
documents = bulk_upload_documents(dataset, 5, ragflow_tmp_dir)
def cleanup():
dataset.delete_documents(ids=None)
request.addfinalizer(cleanup)
return dataset, documents
@pytest.fixture(scope="function")
def add_documents_func(request: FixtureRequest, add_dataset_func: DataSet, ragflow_tmp_dir) -> tuple[DataSet, list[Document]]:
dataset = add_dataset_func
documents = bulk_upload_documents(dataset, 3, ragflow_tmp_dir)
def cleanup():
dataset.delete_documents(ids=None)
request.addfinalizer(cleanup)
return dataset, documents
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_delete_documents.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_delete_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents
class TestDocumentsDeletion:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
({"ids": None}, "", 0),
({"ids": []}, "", 0),
({"ids": ["invalid_id"]}, "Documents not found: ['invalid_id']", 3),
({"ids": ["\n!?。;!?\"'"]}, "Documents not found: ['\\n!?。;!?\"\\'']", 3),
("not json", "must be a mapping", 3),
(lambda r: {"ids": r[:1]}, "", 2),
(lambda r: {"ids": r}, "", 0),
],
)
def test_basic_scenarios(
self,
add_documents_func,
payload,
expected_message,
remaining,
):
dataset, documents = add_documents_func
if callable(payload):
payload = payload([document.id for document in documents])
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.delete_documents(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.delete_documents(**payload)
documents = dataset.list_documents()
assert len(documents) == remaining, str(documents)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
lambda r: {"ids": ["invalid_id"] + r},
lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:3]},
lambda r: {"ids": r + ["invalid_id"]},
],
)
def test_delete_partial_invalid_id(self, add_documents_func, payload):
dataset, documents = add_documents_func
payload = payload([document.id for document in documents])
with pytest.raises(Exception) as exception_info:
dataset.delete_documents(**payload)
assert "Documents not found: ['invalid_id']" in str(exception_info.value), str(exception_info.value)
documents = dataset.list_documents()
assert len(documents) == 0, str(documents)
@pytest.mark.p2
def test_repeated_deletion(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [document.id for document in documents]
dataset.delete_documents(ids=document_ids)
with pytest.raises(Exception) as exception_info:
dataset.delete_documents(ids=document_ids)
assert "Documents not found" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_duplicate_deletion(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [document.id for document in documents]
dataset.delete_documents(ids=document_ids + document_ids)
assert len(dataset.list_documents()) == 0, str(dataset.list_documents())
@pytest.mark.p3
def test_concurrent_deletion(add_dataset, tmp_path):
count = 100
dataset = add_dataset
documents = bulk_upload_documents(dataset, count, tmp_path)
def delete_doc(doc_id):
dataset.delete_documents(ids=[doc_id])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(delete_doc, doc.id) for doc in documents]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_delete_1k(add_dataset, tmp_path):
count = 1_000
dataset = add_dataset
documents = bulk_upload_documents(dataset, count, tmp_path)
assert len(dataset.list_documents(page_size=count * 2)) == count
dataset.delete_documents(ids=[doc.id for doc in documents])
assert len(dataset.list_documents()) == 0
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_update_document.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_update_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from configs import DOCUMENT_NAME_LIMIT
from ragflow_sdk import DataSet
from configs import DEFAULT_PARSER_CONFIG
class TestDocumentsUpdated:
@pytest.mark.p1
@pytest.mark.parametrize(
"name, expected_message",
[
("new_name.txt", ""),
(f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt", ""),
(0, "AttributeError"),
(None, "AttributeError"),
("", "The extension of file can't be changed"),
("ragflow_test_upload_0", "The extension of file can't be changed"),
("ragflow_test_upload_1.txt", "Duplicated document name in the same dataset"),
("RAGFLOW_TEST_UPLOAD_1.TXT", ""),
],
)
def test_name(self, add_documents, name, expected_message):
dataset, documents = add_documents
document = documents[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update({"name": name})
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update({"name": name})
updated_doc = dataset.list_documents(id=document.id)[0]
assert updated_doc.name == name, str(updated_doc)
@pytest.mark.p3
@pytest.mark.parametrize(
"meta_fields, expected_message",
[
({"test": "test"}, ""),
("test", "meta_fields must be a dictionary"),
],
)
def test_meta_fields(self, add_documents, meta_fields, expected_message):
_, documents = add_documents
document = documents[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update({"meta_fields": meta_fields})
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update({"meta_fields": meta_fields})
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method, expected_message",
[
("naive", ""),
("manual", ""),
("qa", ""),
("table", ""),
("paper", ""),
("book", ""),
("laws", ""),
("presentation", ""),
("picture", ""),
("one", ""),
("knowledge_graph", ""),
("email", ""),
("tag", ""),
("", "`chunk_method` doesn't exist"),
("other_chunk_method", "`chunk_method` other_chunk_method doesn't exist"),
],
)
def test_chunk_method(self, add_documents, chunk_method, expected_message):
dataset, documents = add_documents
document = documents[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update({"chunk_method": chunk_method})
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update({"chunk_method": chunk_method})
updated_doc = dataset.list_documents(id=document.id)[0]
assert updated_doc.chunk_method == chunk_method, str(updated_doc)
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_message",
[
({"chunk_count": 1}, "Can't change `chunk_count`"),
pytest.param(
{"create_date": "Fri, 14 Mar 2025 16:53:42 GMT"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"create_time": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"created_by": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"dataset_id": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"id": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"location": "ragflow_test.txt"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"process_begin_at": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"process_duration": 1.0},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
({"progress": 1.0}, "Can't change `progress`"),
pytest.param(
{"progress_msg": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"run": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"size": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"source_type": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"thumbnail": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
({"token_count": 1}, "Can't change `token_count`"),
pytest.param(
{"type": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"update_date": "Fri, 14 Mar 2025 16:33:17 GMT"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"update_time": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
],
)
def test_invalid_field(self, add_documents, payload, expected_message):
_, documents = add_documents
document = documents[0]
with pytest.raises(Exception) as exception_info:
document.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
class TestUpdateDocumentParserConfig:
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method, parser_config, expected_message",
[
("naive", {}, ""),
(
"naive",
DEFAULT_PARSER_CONFIG,
"",
),
pytest.param(
"naive",
{"chunk_token_num": -1},
"chunk_token_num should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 0},
"chunk_token_num should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 100000000},
"chunk_token_num should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"layout_recognize": "DeepDOC"}, ""),
("naive", {"layout_recognize": "Naive"}, ""),
("naive", {"html4excel": True}, ""),
("naive", {"html4excel": False}, ""),
pytest.param(
"naive",
{"html4excel": 1},
"html4excel should be True or False",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"delimiter": ""}, ""),
("naive", {"delimiter": "`##`"}, ""),
pytest.param(
"naive",
{"delimiter": 1},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": -1},
"task_page_size should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 0},
"task_page_size should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 100000000},
"task_page_size should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"raptor": {"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0,}}, ""),
("naive", {"raptor": {"use_raptor": False}}, ""),
pytest.param(
"naive",
{"invalid_key": "invalid_value"},
"Abnormal 'parser_config'. Invalid key: invalid_key",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": -1},
"auto_keywords should be in range from 0 to 32",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": 32},
"auto_keywords should be in range from 0 to 32",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": -1},
"auto_questions should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 10},
"auto_questions should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": -1},
"topn_tags should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": 10},
"topn_tags should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
],
)
def test_parser_config(self, client, add_documents, chunk_method, parser_config, expected_message):
dataset, documents = add_documents
document = documents[0]
from operator import attrgetter
update_data = {"chunk_method": chunk_method, "parser_config": parser_config}
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update(update_data)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update(update_data)
updated_doc = dataset.list_documents(id=document.id)[0]
if parser_config:
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(updated_doc.parser_config) == vv, str(updated_doc)
else:
assert attrgetter(k)(updated_doc.parser_config) == v, str(updated_doc)
else:
expected_config = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
assert str(updated_doc.parser_config) == str(expected_config), str(updated_doc)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_sdk_api/test_file_management_within_dataset/test_parse_documents.py | test/testcases/test_sdk_api/test_file_management_within_dataset/test_parse_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents
from ragflow_sdk import DataSet
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def condition(_dataset: DataSet, _document_ids: list[str] = None):
documents = _dataset.list_documents(page_size=1000)
if _document_ids is None:
for document in documents:
if document.run != "DONE":
return False
return True
target_ids = set(_document_ids)
for document in documents:
if document.id in target_ids:
if document.run != "DONE":
return False
return True
def validate_document_details(dataset, document_ids):
documents = dataset.list_documents(page_size=1000)
for document in documents:
if document.id in document_ids:
assert document.run == "DONE"
assert len(document.process_begin_at) > 0
assert document.process_duration > 0
assert document.progress > 0
assert "Task done" in document.progress_msg
class TestDocumentsParse:
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param(None, "AttributeError", marks=pytest.mark.skip),
pytest.param({"document_ids": []}, "`document_ids` is required", marks=pytest.mark.p1),
pytest.param({"document_ids": ["invalid_id"]}, "Documents not found: ['invalid_id']", marks=pytest.mark.p3),
pytest.param({"document_ids": ["\n!?。;!?\"'"]}, "Documents not found: ['\\n!?。;!?\"\\'']", marks=pytest.mark.p3),
pytest.param("not json", "AttributeError", marks=pytest.mark.skip),
pytest.param(lambda r: {"document_ids": r[:1]}, "", marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r}, "", marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, add_documents_func, payload, expected_message):
dataset, documents = add_documents_func
if callable(payload):
payload = payload([doc.id for doc in documents])
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.async_parse_documents(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.async_parse_documents(**payload)
condition(dataset, payload["document_ids"])
validate_document_details(dataset, payload["document_ids"])
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"document_ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"document_ids": r[:1] + ["invalid_id"] + r[1:3]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_parse_partial_invalid_document_id(self, add_documents_func, payload):
dataset, documents = add_documents_func
document_ids = [doc.id for doc in documents]
payload = payload(document_ids)
with pytest.raises(Exception) as exception_info:
dataset.async_parse_documents(**payload)
assert "Documents not found: ['invalid_id']" in str(exception_info.value), str(exception_info.value)
condition(dataset, document_ids)
validate_document_details(dataset, document_ids)
@pytest.mark.p3
def test_repeated_parse(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [doc.id for doc in documents]
dataset.async_parse_documents(document_ids=document_ids)
condition(dataset, document_ids)
dataset.async_parse_documents(document_ids=document_ids)
@pytest.mark.p3
def test_duplicate_parse(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [doc.id for doc in documents]
dataset.async_parse_documents(document_ids=document_ids + document_ids)
condition(dataset, document_ids)
validate_document_details(dataset, document_ids)
@pytest.mark.p3
def test_parse_100_files(add_dataset_func, tmp_path):
@wait_for(200, 1, "Document parsing timeout")
def condition_inner(_dataset: DataSet, _count: int):
docs = _dataset.list_documents(page_size=_count * 2)
for document in docs:
if document.run != "DONE":
return False
return True
count = 100
dataset = add_dataset_func
documents = bulk_upload_documents(dataset, count, tmp_path)
document_ids = [doc.id for doc in documents]
dataset.async_parse_documents(document_ids=document_ids)
condition_inner(dataset, count)
validate_document_details(dataset, document_ids)
@pytest.mark.p3
def test_concurrent_parse(add_dataset_func, tmp_path):
@wait_for(200, 1, "Document parsing timeout")
def condition_inner(_dataset: DataSet, _count: int):
docs = _dataset.list_documents(page_size=_count * 2)
for document in docs:
if document.run != "DONE":
return False
return True
count = 100
dataset = add_dataset_func
documents = bulk_upload_documents(dataset, count, tmp_path)
document_ids = [doc.id for doc in documents]
def parse_doc(doc_id):
dataset.async_parse_documents(document_ids=[doc_id])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(parse_doc, doc.id) for doc in documents]
responses = list(as_completed(futures))
assert len(responses) == count, responses
condition_inner(dataset, count)
validate_document_details(dataset, document_ids)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/common.py | test/testcases/test_web_api/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
import requests
from configs import HOST_ADDRESS, VERSION
from requests_toolbelt import MultipartEncoder
from utils.file_utils import create_txt_file
HEADERS = {"Content-Type": "application/json"}
KB_APP_URL = f"/{VERSION}/kb"
DOCUMENT_APP_URL = f"/{VERSION}/document"
CHUNK_API_URL = f"/{VERSION}/chunk"
DIALOG_APP_URL = f"/{VERSION}/dialog"
# SESSION_WITH_CHAT_ASSISTANT_API_URL = "/api/v1/chats/{chat_id}/sessions"
# SESSION_WITH_AGENT_API_URL = "/api/v1/agents/{agent_id}/sessions"
MEMORY_API_URL = f"/{VERSION}/memories"
MESSAGE_API_URL = f"/{VERSION}/messages"
# KB APP
def create_kb(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_kbs(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/list", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def update_kb(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/update", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def rm_kb(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def detail_kb(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/detail", headers=headers, auth=auth, params=params)
return res.json()
def list_tags_from_kbs(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/tags", headers=headers, auth=auth, params=params)
return res.json()
def list_tags(auth, dataset_id, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/tags", headers=headers, auth=auth, params=params)
return res.json()
def rm_tags(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/rm_tags", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def rename_tags(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/rename_tags", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def knowledge_graph(auth, dataset_id, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/knowledge_graph", headers=headers, auth=auth, params=params)
return res.json()
def delete_knowledge_graph(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.delete(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/delete_knowledge_graph", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def batch_create_datasets(auth, num):
ids = []
for i in range(num):
res = create_kb(auth, {"name": f"kb_{i}"})
ids.append(res["data"]["kb_id"])
return ids
# DOCUMENT APP
def upload_documents(auth, payload=None, files_path=None):
url = f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/upload"
if files_path is None:
files_path = []
fields = []
file_objects = []
try:
if payload:
for k, v in payload.items():
fields.append((k, str(v)))
for fp in files_path:
p = Path(fp)
f = p.open("rb")
fields.append(("file", (p.name, f)))
file_objects.append(f)
m = MultipartEncoder(fields=fields)
res = requests.post(
url=url,
headers={"Content-Type": m.content_type},
auth=auth,
data=m,
)
return res.json()
finally:
for f in file_objects:
f.close()
def create_document(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_documents(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/list", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def delete_document(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def parse_documents(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/run", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def bulk_upload_documents(auth, kb_id, num, tmp_path):
fps = []
for i in range(num):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
fps.append(fp)
res = upload_documents(auth, {"kb_id": kb_id}, fps)
document_ids = []
for document in res["data"]:
document_ids.append(document["id"])
return document_ids
# CHUNK APP
def add_chunk(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/list", headers=headers, auth=auth, json=payload)
return res.json()
def get_chunk(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/get", headers=headers, auth=auth, params=params)
return res.json()
def update_chunk(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/set", headers=headers, auth=auth, json=payload)
return res.json()
def delete_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/rm", headers=headers, auth=auth, json=payload)
return res.json()
def retrieval_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/retrieval_test", headers=headers, auth=auth, json=payload)
return res.json()
def batch_add_chunks(auth, doc_id, num):
chunk_ids = []
for i in range(num):
res = add_chunk(auth, {"doc_id": doc_id, "content_with_weight": f"chunk test {i}"})
chunk_ids.append(res["data"]["chunk_id"])
return chunk_ids
# DIALOG APP
def create_dialog(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/set", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def update_dialog(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/set", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def get_dialog(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/get", headers=headers, auth=auth, params=params)
return res.json()
def list_dialogs(auth, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/list", headers=headers, auth=auth)
return res.json()
def delete_dialog(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def batch_create_dialogs(auth, num, kb_ids=None):
if kb_ids is None:
kb_ids = []
dialog_ids = []
for i in range(num):
payload = {
"name": f"dialog_{i}",
"description": f"Test dialog {i}",
"kb_ids": kb_ids,
"prompt_config": {"system": "You are a helpful assistant. Use the following knowledge to answer questions: {knowledge}", "parameters": [{"key": "knowledge", "optional": False}]},
"top_n": 6,
"top_k": 1024,
"similarity_threshold": 0.1,
"vector_similarity_weight": 0.3,
"llm_setting": {"model": "gpt-3.5-turbo", "temperature": 0.7},
}
res = create_dialog(auth, payload)
if res["code"] == 0:
dialog_ids.append(res["data"]["id"])
return dialog_ids
def delete_dialogs(auth):
res = list_dialogs(auth)
if res["code"] == 0 and res["data"]:
dialog_ids = [dialog["id"] for dialog in res["data"]]
if dialog_ids:
delete_dialog(auth, {"dialog_ids": dialog_ids})
# MEMORY APP
def create_memory(auth, payload=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def update_memory(auth, memory_id:str, payload=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def delete_memory(auth, memory_id:str):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
res = requests.delete(url=url, headers=HEADERS, auth=auth)
return res.json()
def list_memory(auth, params=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def get_memory_config(auth, memory_id:str):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}/config"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def list_memory_message(auth, memory_id, params=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def add_message(auth, payload=None):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def forget_message(auth, memory_id: str, message_id: int):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}"
res = requests.delete(url=url, headers=HEADERS, auth=auth)
return res.json()
def update_message_status(auth, memory_id: str, message_id: int, status: bool):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}"
payload = {"status": status}
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def search_message(auth, params=None):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/search"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def get_recent_message(auth, params=None):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def get_message_content(auth, memory_id: str, message_id: int):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}/content"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/conftest.py | test/testcases/test_web_api/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import sleep
import pytest
from common import (
batch_add_chunks,
batch_create_datasets,
bulk_upload_documents,
delete_chunks,
delete_dialogs,
list_chunks,
list_documents,
list_kbs,
parse_documents,
rm_kb,
)
from libs.auth import RAGFlowWebApiAuth
from pytest import FixtureRequest
from utils import wait_for
from utils.file_utils import (
create_docx_file,
create_eml_file,
create_excel_file,
create_html_file,
create_image_file,
create_json_file,
create_md_file,
create_pdf_file,
create_ppt_file,
create_txt_file,
)
@wait_for(30, 1, "Document parsing timeout")
def condition(_auth, _kb_id):
res = list_documents(_auth, {"kb_id": _kb_id})
for doc in res["data"]["docs"]:
if doc["run"] != "3":
return False
return True
@pytest.fixture
def generate_test_files(request: FixtureRequest, tmp_path):
file_creators = {
"docx": (tmp_path / "ragflow_test.docx", create_docx_file),
"excel": (tmp_path / "ragflow_test.xlsx", create_excel_file),
"ppt": (tmp_path / "ragflow_test.pptx", create_ppt_file),
"image": (tmp_path / "ragflow_test.png", create_image_file),
"pdf": (tmp_path / "ragflow_test.pdf", create_pdf_file),
"txt": (tmp_path / "ragflow_test.txt", create_txt_file),
"md": (tmp_path / "ragflow_test.md", create_md_file),
"json": (tmp_path / "ragflow_test.json", create_json_file),
"eml": (tmp_path / "ragflow_test.eml", create_eml_file),
"html": (tmp_path / "ragflow_test.html", create_html_file),
}
files = {}
for file_type, (file_path, creator_func) in file_creators.items():
if request.param in ["", file_type]:
creator_func(file_path)
files[file_type] = file_path
return files
@pytest.fixture(scope="class")
def ragflow_tmp_dir(request, tmp_path_factory):
class_name = request.cls.__name__
return tmp_path_factory.mktemp(class_name)
@pytest.fixture(scope="session")
def WebApiAuth(auth):
return RAGFlowWebApiAuth(auth)
@pytest.fixture(scope="function")
def clear_datasets(request: FixtureRequest, WebApiAuth: RAGFlowWebApiAuth):
def cleanup():
res = list_kbs(WebApiAuth, params={"page_size": 1000})
for kb in res["data"]["kbs"]:
rm_kb(WebApiAuth, {"kb_id": kb["id"]})
request.addfinalizer(cleanup)
@pytest.fixture(scope="function")
def clear_dialogs(request, WebApiAuth):
def cleanup():
delete_dialogs(WebApiAuth)
request.addfinalizer(cleanup)
@pytest.fixture(scope="class")
def add_dataset(request: FixtureRequest, WebApiAuth: RAGFlowWebApiAuth) -> str:
def cleanup():
res = list_kbs(WebApiAuth, params={"page_size": 1000})
for kb in res["data"]["kbs"]:
rm_kb(WebApiAuth, {"kb_id": kb["id"]})
request.addfinalizer(cleanup)
return batch_create_datasets(WebApiAuth, 1)[0]
@pytest.fixture(scope="function")
def add_dataset_func(request: FixtureRequest, WebApiAuth: RAGFlowWebApiAuth) -> str:
def cleanup():
res = list_kbs(WebApiAuth, params={"page_size": 1000})
for kb in res["data"]["kbs"]:
rm_kb(WebApiAuth, {"kb_id": kb["id"]})
request.addfinalizer(cleanup)
return batch_create_datasets(WebApiAuth, 1)[0]
@pytest.fixture(scope="class")
def add_document(request, WebApiAuth, add_dataset, ragflow_tmp_dir):
# def cleanup():
# res = list_documents(WebApiAuth, {"kb_id": dataset_id})
# for doc in res["data"]["docs"]:
# delete_document(WebApiAuth, {"doc_id": doc["id"]})
# request.addfinalizer(cleanup)
dataset_id = add_dataset
return dataset_id, bulk_upload_documents(WebApiAuth, dataset_id, 1, ragflow_tmp_dir)[0]
@pytest.fixture(scope="class")
def add_chunks(request, WebApiAuth, add_document):
def cleanup():
res = list_chunks(WebApiAuth, {"doc_id": document_id})
if res["code"] == 0:
chunk_ids = [chunk["chunk_id"] for chunk in res["data"]["chunks"]]
delete_chunks(WebApiAuth, {"doc_id": document_id, "chunk_ids": chunk_ids})
request.addfinalizer(cleanup)
kb_id, document_id = add_document
parse_documents(WebApiAuth, {"doc_ids": [document_id], "run": "1"})
condition(WebApiAuth, kb_id)
chunk_ids = batch_add_chunks(WebApiAuth, document_id, 4)
# issues/6487
sleep(1)
return kb_id, document_id, chunk_ids
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py | test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import retrieval_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = retrieval_chunks(invalid_auth, {"kb_id": "dummy_kb_id", "question": "dummy question"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestChunksRetrieval:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"question": "chunk", "kb_id": None}, 0, 4, ""),
({"question": "chunk", "doc_ids": None}, 101, 0, "required argument are missing: kb_id; "),
({"question": "chunk", "kb_id": None, "doc_ids": None}, 0, 4, ""),
({"question": "chunk"}, 101, 0, "required argument are missing: kb_id; "),
],
)
def test_basic_scenarios(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks
if "kb_id" in payload:
payload["kb_id"] = [dataset_id]
if "doc_ids" in payload:
payload["doc_ids"] = [document_id]
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
pytest.param(
{"page": None, "size": 2},
100,
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""",
marks=pytest.mark.skip,
),
pytest.param(
{"page": 0, "size": 2},
100,
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param({"page": 2, "size": 2}, 0, 2, "", marks=pytest.mark.skip(reason="issues/6646")),
({"page": 3, "size": 2}, 0, 0, ""),
({"page": "3", "size": 2}, 0, 0, ""),
pytest.param(
{"page": -1, "size": 2},
100,
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param(
{"page": "a", "size": 2},
100,
0,
"""ValueError("invalid literal for int() with base 10: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_page(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
pytest.param(
{"size": None},
100,
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""",
marks=pytest.mark.skip,
),
# ({"size": 0}, 0, 0, ""),
({"size": 1}, 0, 1, ""),
({"size": 5}, 0, 4, ""),
({"size": "1"}, 0, 1, ""),
# ({"size": -1}, 0, 0, ""),
pytest.param(
{"size": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_page_size(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"vector_similarity_weight": 0}, 0, 4, ""),
({"vector_similarity_weight": 0.5}, 0, 4, ""),
({"vector_similarity_weight": 10}, 0, 4, ""),
pytest.param(
{"vector_similarity_weight": "a"},
100,
0,
"""ValueError("could not convert string to float: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_vector_similarity_weight(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"top_k": 10}, 0, 4, ""),
pytest.param(
{"top_k": 1},
0,
4,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": 1},
0,
1,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": -1},
100,
4,
"must be greater than 0",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": -1},
100,
4,
"3014",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_top_k(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert expected_message in res["message"], res
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"rerank_id": "BAAI/bge-reranker-v2-m3"}, 0, ""),
pytest.param({"rerank_id": "unknown"}, 100, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip),
],
)
def test_rerank_id(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) > 0, res
else:
assert expected_message in res["message"], res
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"keyword": True}, 0, 5, ""),
({"keyword": "True"}, 0, 5, ""),
({"keyword": False}, 0, 5, ""),
({"keyword": "False"}, 0, 5, ""),
({"keyword": None}, 0, 5, ""),
],
)
def test_keyword(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk test", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_highlight, expected_message",
[
({"highlight": True}, 0, True, ""),
({"highlight": "True"}, 0, True, ""),
pytest.param({"highlight": False}, 0, False, "", marks=pytest.mark.skip(reason="issues/6648")),
pytest.param({"highlight": "False"}, 0, False, "", marks=pytest.mark.skip(reason="issues/6648")),
pytest.param({"highlight": None}, 0, False, "", marks=pytest.mark.skip(reason="issues/6648")),
],
)
def test_highlight(self, WebApiAuth, add_chunks, payload, expected_code, expected_highlight, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_highlight:
for chunk in res["data"]["chunks"]:
assert "highlight" in chunk, res
else:
for chunk in res["data"]["chunks"]:
assert "highlight" not in chunk, res
if expected_code != 0:
assert res["message"] == expected_message, res
@pytest.mark.p3
def test_invalid_params(self, WebApiAuth, add_chunks):
dataset_id, _, _ = add_chunks
payload = {"question": "chunk", "kb_id": [dataset_id], "a": "b"}
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 4, res
@pytest.mark.p3
def test_concurrent_retrieval(self, WebApiAuth, add_chunks):
dataset_id, _, _ = add_chunks
count = 100
payload = {"question": "chunk", "kb_id": [dataset_id]}
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(retrieval_chunks, WebApiAuth, payload) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_chunk_app/test_rm_chunks.py | test/testcases/test_web_api/test_chunk_app/test_rm_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks, delete_chunks, list_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_chunks(invalid_auth, {"doc_id": "document_id", "chunk_ids": ["1"]})
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChunksDeletion:
@pytest.mark.p3
@pytest.mark.parametrize(
"doc_id, expected_code, expected_message",
[
("", 102, "Document not found!"),
("invalid_document_id", 102, "Document not found!"),
],
)
def test_invalid_document_id(self, WebApiAuth, add_chunks_func, doc_id, expected_code, expected_message):
_, _, chunk_ids = add_chunks_func
res = delete_chunks(WebApiAuth, {"doc_id": doc_id, "chunk_ids": chunk_ids})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"chunk_ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"chunk_ids": r[:1] + ["invalid_id"] + r[1:4]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"chunk_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, WebApiAuth, add_chunks_func, payload):
_, doc_id, chunk_ids = add_chunks_func
if callable(payload):
payload = payload(chunk_ids)
payload["doc_id"] = doc_id
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 0, res
assert res["data"]["total"] == 0, res
@pytest.mark.p3
def test_repeated_deletion(self, WebApiAuth, add_chunks_func):
_, doc_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids, "doc_id": doc_id}
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "Index updating failure", res
@pytest.mark.p3
def test_duplicate_deletion(self, WebApiAuth, add_chunks_func):
_, doc_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids * 2, "doc_id": doc_id}
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 0, res
assert res["data"]["total"] == 0, res
@pytest.mark.p3
def test_concurrent_deletion(self, WebApiAuth, add_document):
count = 100
_, doc_id = add_document
chunk_ids = batch_add_chunks(WebApiAuth, doc_id, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
delete_chunks,
WebApiAuth,
{"doc_id": doc_id, "chunk_ids": chunk_ids[i : i + 1]},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_delete_1k(self, WebApiAuth, add_document):
chunks_num = 1_000
_, doc_id = add_document
chunk_ids = batch_add_chunks(WebApiAuth, doc_id, chunks_num)
from time import sleep
sleep(1)
res = delete_chunks(WebApiAuth, {"doc_id": doc_id, "chunk_ids": chunk_ids})
assert res["code"] == 0
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == 0, res
assert res["data"]["total"] == 0, res
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", 5, marks=pytest.mark.skip),
pytest.param({"chunk_ids": ["invalid_id"]}, 102, "Index updating failure", 4, marks=pytest.mark.p3),
pytest.param("not json", 100, """UnboundLocalError("local variable \'duplicate_messages\' referenced before assignment")""", 5, marks=pytest.mark.skip(reason="pull/6376")),
pytest.param(lambda r: {"chunk_ids": r[:1]}, 0, "", 3, marks=pytest.mark.p3),
pytest.param(lambda r: {"chunk_ids": r}, 0, "", 0, marks=pytest.mark.p1),
pytest.param({"chunk_ids": []}, 0, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(self, WebApiAuth, add_chunks_func, payload, expected_code, expected_message, remaining):
_, doc_id, chunk_ids = add_chunks_func
if callable(payload):
payload = payload(chunk_ids)
payload["doc_id"] = doc_id
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if res["code"] != 0:
assert res["message"] == expected_message, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == remaining, res
assert res["data"]["total"] == remaining, res
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_chunk_app/test_update_chunk.py | test/testcases/test_web_api/test_chunk_app/test_update_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
from time import sleep
import pytest
from common import delete_document, list_chunks, update_chunk
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = update_chunk(invalid_auth, {"doc_id": "doc_id", "chunk_id": "chunk_id", "content_with_weight": "test"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestUpdateChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": None}, 100, "TypeError('expected string or bytes-like object')"),
({"content_with_weight": ""}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
({"content_with_weight": 1}, 100, "TypeError('expected string or bytes-like object')"),
({"content_with_weight": "update chunk"}, 0, ""),
({"content_with_weight": " "}, 0, ""),
({"content_with_weight": "\n!?。;!?\"'"}, 0, ""),
],
)
def test_content(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id}
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["content_with_weight"] == payload["content_with_weight"]
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"important_kwd": ["a", "b", "c"]}, 0, ""),
({"important_kwd": [""]}, 0, ""),
({"important_kwd": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"important_kwd": ["a", "a"]}, 0, ""),
({"important_kwd": "abc"}, 102, "`important_kwd` should be a list"),
({"important_kwd": 123}, 102, "`important_kwd` should be a list"),
],
)
def test_important_keywords(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"} # Add content_with_weight as it's required
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["important_kwd"] == payload["important_kwd"]
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"question_kwd": ["a", "b", "c"]}, 0, ""),
({"question_kwd": [""]}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
({"question_kwd": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"question_kwd": ["a", "a"]}, 0, ""),
({"question_kwd": "abc"}, 102, "`question_kwd` should be a list"),
({"question_kwd": 123}, 102, "`question_kwd` should be a list"),
],
)
def test_questions(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"} # Add content_with_weight as it's required
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["question_kwd"] == payload["question_kwd"]
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"available_int": 1}, 0, ""),
({"available_int": 0}, 0, ""),
],
)
def test_available(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"}
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["available_int"] == payload["available_int"]
@pytest.mark.p3
@pytest.mark.parametrize(
"doc_id_param, expected_code, expected_message",
[
("", 102, "Tenant not found!"),
("invalid_doc_id", 102, "Tenant not found!"),
],
)
def test_invalid_document_id_for_update(self, WebApiAuth, add_chunks, doc_id_param, expected_code, expected_message):
_, _, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
payload = {"doc_id": doc_id_param, "chunk_id": chunk_id, "content_with_weight": "test content"}
res = update_chunk(WebApiAuth, payload)
assert res["code"] == expected_code
assert expected_message in res["message"]
@pytest.mark.p3
def test_repeated_update_chunk(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
payload1 = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "chunk test 1"}
res = update_chunk(WebApiAuth, payload1)
assert res["code"] == 0
payload2 = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "chunk test 2"}
res = update_chunk(WebApiAuth, payload2)
assert res["code"] == 0
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"unknown_key": "unknown_value"}, 0, ""),
({}, 0, ""),
pytest.param(None, 100, """TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""", marks=pytest.mark.skip),
],
)
def test_invalid_params(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"}
if payload is not None:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554")
def test_concurrent_update_chunk(self, WebApiAuth, add_chunks):
count = 50
_, doc_id, chunk_ids = add_chunks
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
update_chunk,
WebApiAuth,
{"doc_id": doc_id, "chunk_id": chunk_ids[randint(0, 3)], "content_with_weight": f"update chunk test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_update_chunk_to_deleted_document(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
delete_document(WebApiAuth, {"doc_id": doc_id})
payload = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "test content"}
res = update_chunk(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "Tenant not found!", res
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_chunk_app/conftest.py | test/testcases/test_web_api/test_chunk_app/conftest.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import sleep
import pytest
from common import batch_add_chunks, delete_chunks, list_chunks, list_documents, parse_documents
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def condition(_auth, _kb_id):
res = list_documents(_auth, {"kb_id": _kb_id})
for doc in res["data"]["docs"]:
if doc["run"] != "3":
return False
return True
@pytest.fixture(scope="function")
def add_chunks_func(request, WebApiAuth, add_document):
def cleanup():
res = list_chunks(WebApiAuth, {"doc_id": document_id})
chunk_ids = [chunk["chunk_id"] for chunk in res["data"]["chunks"]]
delete_chunks(WebApiAuth, {"doc_id": document_id, "chunk_ids": chunk_ids})
request.addfinalizer(cleanup)
kb_id, document_id = add_document
parse_documents(WebApiAuth, {"doc_ids": [document_id], "run": "1"})
condition(WebApiAuth, kb_id)
chunk_ids = batch_add_chunks(WebApiAuth, document_id, 4)
# issues/6487
sleep(1)
return kb_id, document_id, chunk_ids
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_chunk_app/test_list_chunks.py | test/testcases/test_web_api/test_chunk_app/test_list_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks, list_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_chunks(invalid_auth, {"doc_id": "document_id"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestChunksList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
pytest.param({"page": None, "size": 2}, 100, 0, """TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""", marks=pytest.mark.skip),
pytest.param({"page": 0, "size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "size": 2}, 0, 2, ""),
({"page": 3, "size": 2}, 0, 1, ""),
({"page": "3", "size": 2}, 0, 1, ""),
pytest.param({"page": -1, "size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, WebApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id}
if params:
payload.update(params)
res = list_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"size": None}, 100, 0, """TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")"""),
pytest.param({"size": 0}, 0, 5, ""),
({"size": 1}, 0, 1, ""),
({"size": 6}, 0, 5, ""),
({"size": "1"}, 0, 1, ""),
pytest.param({"size": -1}, 0, 5, "", marks=pytest.mark.skip),
pytest.param({"size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, WebApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id}
if params:
payload.update(params)
res = list_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "1"}, 1),
pytest.param({"keywords": "chunk"}, 4, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6509")),
({"keywords": "content"}, 1),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, WebApiAuth, add_chunks, params, expected_page_size):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id}
if params:
payload.update(params)
res = list_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == expected_page_size, res
@pytest.mark.p3
def test_invalid_params(self, WebApiAuth, add_chunks):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id, "a": "b"}
res = list_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 5, res
@pytest.mark.p3
def test_concurrent_list(self, WebApiAuth, add_chunks):
_, doc_id, _ = add_chunks
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_chunks, WebApiAuth, {"doc_id": doc_id}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(len(future.result()["data"]["chunks"]) == 5 for future in futures)
@pytest.mark.p1
def test_default(self, WebApiAuth, add_document):
_, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
chunks_count = res["data"]["doc"]["chunk_num"]
batch_add_chunks(WebApiAuth, doc_id, 31)
# issues/6487
from time import sleep
sleep(3)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0
assert len(res["data"]["chunks"]) == 30
assert res["data"]["doc"]["chunk_num"] == chunks_count + 31
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_chunk_app/test_create_chunk.py | test/testcases/test_web_api/test_chunk_app/test_create_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import add_chunk, delete_document, get_chunk, list_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
def validate_chunk_details(auth, kb_id, doc_id, payload, res):
chunk_id = res["data"]["chunk_id"]
res = get_chunk(auth, {"chunk_id": chunk_id})
assert res["code"] == 0, res
chunk = res["data"]
assert chunk["doc_id"] == doc_id
assert chunk["kb_id"] == kb_id
assert chunk["content_with_weight"] == payload["content_with_weight"]
if "important_kwd" in payload:
assert chunk["important_kwd"] == payload["important_kwd"]
if "question_kwd" in payload:
expected = [str(q).strip() for q in payload.get("question_kwd", [])]
assert chunk["question_kwd"] == expected
@pytest.mark.p1
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = add_chunk(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestAddChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": None}, 100, """TypeError("unsupported operand type(s) for +: 'NoneType' and 'str'")"""),
({"content_with_weight": ""}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
pytest.param(
{"content_with_weight": 1},
100,
"""TypeError("unsupported operand type(s) for +: 'int' and 'str'")""",
marks=pytest.mark.skip,
),
({"content_with_weight": "a"}, 0, ""),
({"content_with_weight": " "}, 0, ""),
({"content_with_weight": "\n!?。;!?\"'"}, 0, ""),
],
)
def test_content(self, WebApiAuth, add_document, payload, expected_code, expected_message):
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == expected_code, res
if expected_code == 0:
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": "chunk test", "important_kwd": ["a", "b", "c"]}, 0, ""),
({"content_with_weight": "chunk test", "important_kwd": [""]}, 0, ""),
(
{"content_with_weight": "chunk test", "important_kwd": [1]},
100,
"TypeError('sequence item 0: expected str instance, int found')",
),
({"content_with_weight": "chunk test", "important_kwd": ["a", "a"]}, 0, ""),
({"content_with_weight": "chunk test", "important_kwd": "abc"}, 102, "`important_kwd` is required to be a list"),
({"content_with_weight": "chunk test", "important_kwd": 123}, 102, "`important_kwd` is required to be a list"),
],
)
def test_important_keywords(self, WebApiAuth, add_document, payload, expected_code, expected_message):
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == expected_code, res
if expected_code == 0:
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": "chunk test", "question_kwd": ["a", "b", "c"]}, 0, ""),
({"content_with_weight": "chunk test", "question_kwd": [""]}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
({"content_with_weight": "chunk test", "question_kwd": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"content_with_weight": "chunk test", "question_kwd": ["a", "a"]}, 0, ""),
({"content_with_weight": "chunk test", "question_kwd": "abc"}, 102, "`question_kwd` is required to be a list"),
({"content_with_weight": "chunk test", "question_kwd": 123}, 102, "`question_kwd` is required to be a list"),
],
)
def test_questions(self, WebApiAuth, add_document, payload, expected_code, expected_message):
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == expected_code, res
if expected_code == 0:
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"doc_id, expected_code, expected_message",
[
("", 102, "Document not found!"),
("invalid_document_id", 102, "Document not found!"),
],
)
def test_invalid_document_id(self, WebApiAuth, add_document, doc_id, expected_code, expected_message):
_, _ = add_document
res = add_chunk(WebApiAuth, {"doc_id": doc_id, "content_with_weight": "chunk test"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.p3
def test_repeated_add_chunk(self, WebApiAuth, add_document):
payload = {"content_with_weight": "chunk test"}
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_num"]
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == 0, res
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == 0, res
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 2, res
@pytest.mark.p2
def test_add_chunk_to_deleted_document(self, WebApiAuth, add_document):
_, doc_id = add_document
delete_document(WebApiAuth, {"doc_id": doc_id})
res = add_chunk(WebApiAuth, {"doc_id": doc_id, "content_with_weight": "chunk test"})
assert res["code"] == 102, res
assert res["message"] == "Document not found!", res
@pytest.mark.skip(reason="issues/6411")
@pytest.mark.p3
def test_concurrent_add_chunk(self, WebApiAuth, add_document):
count = 50
_, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
add_chunk,
WebApiAuth,
{"doc_id": doc_id, "content_with_weight": f"chunk test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + count
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_message_app/test_get_message_content.py | test/testcases/test_web_api/test_message_app/test_get_message_content.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from test_web_api.common import get_message_content, get_recent_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p1
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = get_message_content(invalid_auth, "empty_memory_id", 0)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_multiple_type_message_func")
class TestGetMessageContent:
@pytest.mark.p1
def test_get_message_content(self, WebApiAuth):
memory_id = self.memory_id
recent_messages = get_recent_message(WebApiAuth, {"memory_id": memory_id})
assert len(recent_messages["data"]) > 0, recent_messages
message = random.choice(recent_messages["data"])
message_id = message["message_id"]
content_res = get_message_content(WebApiAuth, memory_id, message_id)
for field in ["content", "content_embed"]:
assert field in content_res["data"]
assert content_res["data"][field] is not None, content_res
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_message_app/test_get_recent_message.py | test/testcases/test_web_api/test_message_app/test_get_recent_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from test_web_api.common import get_recent_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p1
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = get_recent_message(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestGetRecentMessage:
@pytest.mark.p1
def test_get_recent_messages(self, WebApiAuth):
memory_id = self.memory_id
res = get_recent_message(WebApiAuth, params={"memory_id": memory_id})
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
@pytest.mark.p2
def test_filter_recent_messages_by_agent(self, WebApiAuth):
memory_id = self.memory_id
agent_ids = self.agent_ids
agent_id = random.choice(agent_ids)
res = get_recent_message(WebApiAuth, params={"agent_id": agent_id, "memory_id": memory_id})
assert res["code"] == 0, res
for message in res["data"]:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
def test_filter_recent_messages_by_session(self, WebApiAuth):
memory_id = self.memory_id
session_ids = self.session_ids
session_id = random.choice(session_ids)
res = get_recent_message(WebApiAuth, params={"session_id": session_id, "memory_id": memory_id})
assert res["code"] == 0, res
for message in res["data"]:
assert message["session_id"] == session_id, message
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/test/testcases/test_web_api/test_message_app/test_update_message_status.py | test/testcases/test_web_api/test_message_app/test_update_message_status.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from test_web_api.common import update_message_status, list_memory_message, get_message_content
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p1
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = update_message_status(invalid_auth, "empty_memory_id", 0, False)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestUpdateMessageStatus:
@pytest.mark.p1
def test_update_to_false(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert len(list_res["data"]["messages"]["message_list"]) > 0
message = random.choice(list_res["data"]["messages"]["message_list"])
res = update_message_status(WebApiAuth, memory_id, message["message_id"], False)
assert res["code"] == 0, res
updated_message_res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert updated_message_res["code"] == 0, res
assert not updated_message_res["data"]["status"], res
@pytest.mark.p1
def test_update_to_true(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert len(list_res["data"]["messages"]["message_list"]) > 0
# set 1 random message to false first
message = random.choice(list_res["data"]["messages"]["message_list"])
set_to_false_res = update_message_status(WebApiAuth, memory_id, message["message_id"], False)
assert set_to_false_res["code"] == 0, set_to_false_res
updated_message_res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert updated_message_res["code"] == 0, set_to_false_res
assert not updated_message_res["data"]["status"], updated_message_res
# set to true
set_to_true_res = update_message_status(WebApiAuth, memory_id, message["message_id"], True)
assert set_to_true_res["code"] == 0, set_to_true_res
res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert res["code"] == 0, res
assert res["data"]["status"], res
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.