sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
infiniflow/ragflow:common/token_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tiktoken
from common.file_utils import get_project_base_directory
tiktoken_cache_dir = get_project_base_directory()
os.environ["TIKTOKEN_CACHE_DIR"] = tiktoken_cache_dir
# encoder = tiktoken.encoding_for_model("gpt-3.5-turbo")
encoder = tiktoken.get_encoding("cl100k_base")
def num_tokens_from_string(string: str) -> int:
"""Returns the number of tokens in a text string."""
try:
code_list = encoder.encode(string)
return len(code_list)
except Exception:
return 0
def total_token_count_from_response(resp):
"""
Extract token count from LLM response in various formats.
Handles None responses and different response structures from various LLM providers.
Returns 0 if token count cannot be determined.
"""
if resp is None:
return 0
try:
if hasattr(resp, "usage") and hasattr(resp.usage, "total_tokens"):
return resp.usage.total_tokens
except Exception:
pass
try:
if hasattr(resp, "usage_metadata") and hasattr(resp.usage_metadata, "total_tokens"):
return resp.usage_metadata.total_tokens
except Exception:
pass
try:
if hasattr(resp, "meta") and hasattr(resp.meta, "billed_units") and hasattr(resp.meta.billed_units, "input_tokens"):
return resp.meta.billed_units.input_tokens
except Exception:
pass
if isinstance(resp, dict) and 'usage' in resp and 'total_tokens' in resp['usage']:
try:
return resp["usage"]["total_tokens"]
except Exception:
pass
if isinstance(resp, dict) and 'usage' in resp and 'input_tokens' in resp['usage'] and 'output_tokens' in resp['usage']:
try:
return resp["usage"]["input_tokens"] + resp["usage"]["output_tokens"]
except Exception:
pass
if isinstance(resp, dict) and 'meta' in resp and 'tokens' in resp['meta'] and 'input_tokens' in resp['meta']['tokens'] and 'output_tokens' in resp['meta']['tokens']:
try:
return resp["meta"]["tokens"]["input_tokens"] + resp["meta"]["tokens"]["output_tokens"]
except Exception:
pass
return 0
def truncate(string: str, max_len: int) -> str:
"""Returns truncated text if the length of text exceed max_len."""
return encoder.decode(encoder.encode(string)[:max_len])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/token_utils.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/unit_test/common/test_token_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from common.token_utils import num_tokens_from_string, total_token_count_from_response, truncate, encoder
import pytest
class TestNumTokensFromString:
"""Test cases for num_tokens_from_string function"""
def test_empty_string(self):
"""Test that empty string returns zero tokens"""
result = num_tokens_from_string("")
assert result == 0
def test_single_word(self):
"""Test token count for a single word"""
# "hello" should be 1 token with cl100k_base encoding
result = num_tokens_from_string("hello")
assert result == 1
def test_multiple_words(self):
"""Test token count for multiple words"""
# "hello world" typically becomes 2 tokens
result = num_tokens_from_string("hello world")
assert result == 2
def test_special_characters(self):
"""Test token count with special characters"""
result = num_tokens_from_string("hello, world!")
# Special characters may be separate tokens
assert result == 4
def test_hanzi_characters(self):
"""Test token count with special characters"""
result = num_tokens_from_string("ไธ็")
# Special characters may be separate tokens
assert result > 0
def test_unicode_characters(self):
"""Test token count with unicode characters"""
result = num_tokens_from_string("Hello ไธ็ ๐")
# Unicode characters typically require multiple tokens
assert result > 0
def test_long_text(self):
"""Test token count for longer text"""
long_text = "This is a longer piece of text that should contain multiple sentences. " \
"It will help verify that the token counting works correctly for substantial input."
result = num_tokens_from_string(long_text)
assert result > 10
def test_whitespace_only(self):
"""Test token count for whitespace-only strings"""
result = num_tokens_from_string(" \n\t ")
# Whitespace may or may not be tokens depending on the encoding
assert result >= 0
def test_numbers(self):
"""Test token count with numerical values"""
result = num_tokens_from_string("12345 678.90")
assert result > 0
def test_mixed_content(self):
"""Test token count with mixed content types"""
mixed_text = "Hello! 123 Main St. Price: $19.99 ๐"
result = num_tokens_from_string(mixed_text)
assert result > 0
def test_encoding_error_handling(self):
"""Test that function handles encoding errors gracefully"""
# This test verifies the exception handling in the function.
# The function should return 0 when encoding fails
# Note: We can't easily simulate encoding errors without mocking
pass
# Additional parameterized tests for efficiency
@pytest.mark.parametrize("input_string,expected_min_tokens", [
("a", 1), # Single character
("test", 1), # Single word
("hello world", 2), # Two words
("This is a sentence.", 4), # Short sentence
# ("A" * 100, 100), # Repeated characters
])
def test_token_count_ranges(input_string, expected_min_tokens):
"""Parameterized test for various input strings"""
result = num_tokens_from_string(input_string)
assert result >= expected_min_tokens
def test_consistency():
"""Test that the same input produces consistent results"""
test_string = "Consistent token counting"
first_result = num_tokens_from_string(test_string)
second_result = num_tokens_from_string(test_string)
assert first_result == second_result
assert first_result > 0
class TestTotalTokenCountFromResponse:
"""Test cases for total_token_count_from_response function"""
def test_dict_with_usage_total_tokens(self):
"""Test dictionary response with usage['total_tokens']"""
resp_dict = {
'usage': {
'total_tokens': 175
}
}
result = total_token_count_from_response(resp_dict)
assert result == 175
def test_dict_with_usage_input_output_tokens(self):
"""Test dictionary response with input_tokens and output_tokens in usage"""
resp_dict = {
'usage': {
'input_tokens': 100,
'output_tokens': 50
}
}
result = total_token_count_from_response(resp_dict)
assert result == 150
def test_dict_with_meta_tokens_input_output(self):
"""Test dictionary response with meta.tokens.input_tokens and output_tokens"""
resp_dict = {
'meta': {
'tokens': {
'input_tokens': 80,
'output_tokens': 40
}
}
}
result = total_token_count_from_response(resp_dict)
assert result == 120
def test_priority_order_dict_usage_total_tokens_third(self):
"""Test that dict['usage']['total_tokens'] is third in priority"""
resp_dict = {
'usage': {
'total_tokens': 180,
'input_tokens': 100,
'output_tokens': 80
},
'meta': {
'tokens': {
'input_tokens': 200,
'output_tokens': 100
}
}
}
result = total_token_count_from_response(resp_dict)
assert result == 180 # Should use total_tokens from usage
def test_priority_order_dict_usage_input_output_fourth(self):
"""Test that dict['usage']['input_tokens'] + output_tokens is fourth in priority"""
resp_dict = {
'usage': {
'input_tokens': 120,
'output_tokens': 60
},
'meta': {
'tokens': {
'input_tokens': 200,
'output_tokens': 100
}
}
}
result = total_token_count_from_response(resp_dict)
assert result == 180 # Should sum input_tokens + output_tokens from usage
def test_priority_order_meta_tokens_last(self):
"""Test that meta.tokens is the last option in priority"""
resp_dict = {
'meta': {
'tokens': {
'input_tokens': 90,
'output_tokens': 30
}
}
}
result = total_token_count_from_response(resp_dict)
assert result == 120
def test_no_token_info_returns_zero(self):
"""Test that function returns 0 when no token information is found"""
empty_resp = {}
result = total_token_count_from_response(empty_resp)
assert result == 0
def test_partial_dict_usage_missing_output_tokens(self):
"""Test dictionary with usage but missing output_tokens"""
resp_dict = {
'usage': {
'input_tokens': 100
# Missing output_tokens
}
}
result = total_token_count_from_response(resp_dict)
assert result == 0 # Should not match the condition and return 0
def test_partial_meta_tokens_missing_input_tokens(self):
"""Test dictionary with meta.tokens but missing input_tokens"""
resp_dict = {
'meta': {
'tokens': {
'output_tokens': 50
# Missing input_tokens
}
}
}
result = total_token_count_from_response(resp_dict)
assert result == 0 # Should not match the condition and return 0
def test_none_response(self):
"""Test that function handles None response gracefully"""
result = total_token_count_from_response(None)
assert result == 0
def test_invalid_response_type(self):
"""Test that function handles invalid response types gracefully"""
result = total_token_count_from_response("invalid response")
assert result == 0
# result = total_token_count_from_response(123)
# assert result == 0
class TestTruncate:
"""Test cases for truncate function"""
def test_empty_string(self):
"""Test truncation of empty string"""
result = truncate("", 5)
assert result == ""
assert isinstance(result, str)
def test_string_shorter_than_max_len(self):
"""Test string that is shorter than max_len"""
original_string = "hello"
result = truncate(original_string, 10)
assert result == original_string
assert len(encoder.encode(result)) <= 10
def test_string_equal_to_max_len(self):
"""Test string that exactly equals max_len in tokens"""
# Create a string that encodes to exactly 5 tokens
test_string = "hello world test"
encoded = encoder.encode(test_string)
exact_length = len(encoded)
result = truncate(test_string, exact_length)
assert result == test_string
assert len(encoder.encode(result)) == exact_length
def test_string_longer_than_max_len(self):
"""Test string that is longer than max_len"""
long_string = "This is a longer string that will be truncated"
max_len = 5
result = truncate(long_string, max_len)
assert len(encoder.encode(result)) == max_len
assert result != long_string
def test_truncation_preserves_beginning(self):
"""Test that truncation preserves the beginning of the string"""
test_string = "The quick brown fox jumps over the lazy dog"
max_len = 3
result = truncate(test_string, max_len)
encoded_result = encoder.encode(result)
# The truncated result should match the beginning of the original encoding
original_encoded = encoder.encode(test_string)
assert encoded_result == original_encoded[:max_len]
def test_unicode_characters(self):
"""Test truncation with unicode characters"""
unicode_string = "Hello ไธ็ ๐ ๆต่ฏ"
max_len = 4
result = truncate(unicode_string, max_len)
assert len(encoder.encode(result)) == max_len
# Should be a valid string
assert isinstance(result, str)
def test_special_characters(self):
"""Test truncation with special characters"""
special_string = "Hello, world! @#$%^&*()"
max_len = 3
result = truncate(special_string, max_len)
assert len(encoder.encode(result)) == max_len
def test_whitespace_string(self):
"""Test truncation of whitespace-only string"""
whitespace_string = " \n\t "
max_len = 2
result = truncate(whitespace_string, max_len)
assert len(encoder.encode(result)) <= max_len
assert isinstance(result, str)
def test_max_len_zero(self):
"""Test truncation with max_len = 0"""
test_string = "hello world"
result = truncate(test_string, 0)
assert result == ""
assert len(encoder.encode(result)) == 0
def test_max_len_one(self):
"""Test truncation with max_len = 1"""
test_string = "hello world"
result = truncate(test_string, 1)
assert len(encoder.encode(result)) == 1
def test_preserves_decoding_encoding_consistency(self):
"""Test that truncation preserves encoding-decoding consistency"""
test_string = "This is a test string for encoding consistency"
max_len = 6
result = truncate(test_string, max_len)
# Re-encoding the result should give the same token count
re_encoded = encoder.encode(result)
assert len(re_encoded) == max_len
def test_multibyte_characters_truncation(self):
"""Test truncation with multibyte characters that span multiple tokens"""
# Some unicode characters may require multiple tokens
multibyte_string = "๐๐๐โจ๐ฅ๐ซ"
max_len = 3
result = truncate(multibyte_string, max_len)
assert len(encoder.encode(result)) == max_len
def test_mixed_english_chinese_text(self):
"""Test truncation with mixed English and Chinese text"""
mixed_string = "Hello ไธ็, this is a test ๆต่ฏ"
max_len = 5
result = truncate(mixed_string, max_len)
assert len(encoder.encode(result)) == max_len
def test_numbers_and_symbols(self):
"""Test truncation with numbers and symbols"""
number_string = "12345 678.90 $100.00 @username #tag"
max_len = 4
result = truncate(number_string, max_len)
assert len(encoder.encode(result)) == max_len
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_token_utils.py",
"license": "Apache License 2.0",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/common/test_file_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
from unittest.mock import patch
from common import file_utils
from common.file_utils import get_project_base_directory
class TestGetProjectBaseDirectory:
"""Test cases for get_project_base_directory function"""
def test_returns_project_base_when_no_args(self):
"""Test that function returns project base directory when no arguments provided"""
result = get_project_base_directory()
assert result is not None
assert isinstance(result, str)
assert os.path.isabs(result) # Should return absolute path
def test_returns_path_with_single_argument(self):
"""Test that function joins project base with single additional path component"""
result = get_project_base_directory("subfolder")
assert result is not None
assert "subfolder" in result
assert result.endswith("subfolder")
def test_returns_path_with_multiple_arguments(self):
"""Test that function joins project base with multiple path components"""
result = get_project_base_directory("folder1", "folder2", "file.txt")
assert result is not None
assert "folder1" in result
assert "folder2" in result
assert "file.txt" in result
assert os.path.basename(result) == "file.txt"
def test_uses_environment_variable_when_available(self):
"""Test that function uses RAG_PROJECT_BASE environment variable when set"""
test_path = "/custom/project/path"
file_utils.PROJECT_BASE = test_path
result = get_project_base_directory()
assert result == test_path
def test_calculates_default_path_when_no_env_vars(self):
"""Test that function calculates default path when no environment variables are set"""
with patch.dict(os.environ, {}, clear=True): # Clear all environment variables
# Reset the global variable to force re-initialization
result = get_project_base_directory()
# Should return a valid absolute path
assert result is not None
assert os.path.isabs(result)
assert os.path.basename(result) != "" # Should not be root directory
def test_caches_project_base_value(self):
"""Test that PROJECT_BASE is cached after first calculation"""
# Reset the global variable
# First call should calculate the value
first_result = get_project_base_directory()
# Store the current value
cached_value = file_utils.PROJECT_BASE
# Second call should use cached value
second_result = get_project_base_directory()
assert first_result == second_result
assert file_utils.PROJECT_BASE == cached_value
def test_path_components_joined_correctly(self):
"""Test that path components are properly joined with the base directory"""
base_path = get_project_base_directory()
expected_path = os.path.join(base_path, "data", "files", "document.txt")
result = get_project_base_directory("data", "files", "document.txt")
assert result == expected_path
def test_handles_empty_string_arguments(self):
"""Test that function handles empty string arguments correctly"""
result = get_project_base_directory("")
# Should still return a valid path (base directory)
assert result is not None
assert os.path.isabs(result)
# Parameterized tests for different path combinations
@pytest.mark.parametrize("path_args,expected_suffix", [
((), ""), # No additional arguments
(("src",), "src"),
(("data", "models"), os.path.join("data", "models")),
(("config", "app", "settings.json"), os.path.join("config", "app", "settings.json")),
])
def test_various_path_combinations(path_args, expected_suffix):
"""Test various combinations of path arguments"""
base_path = get_project_base_directory()
result = get_project_base_directory(*path_args)
if expected_suffix:
assert result.endswith(expected_suffix)
else:
assert result == base_path
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_file_utils.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/common/test_decorator.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from common.decorator import singleton
# Test class for demonstration
@singleton
class TestClass:
def __init__(self):
self.counter = 0
def increment(self):
self.counter += 1
return self.counter
# Test cases
class TestSingleton:
def test_state_persistence(self):
"""Test that instance state persists across multiple calls"""
instance1 = TestClass()
instance1.increment()
instance1.increment()
instance2 = TestClass()
assert instance2.counter == 2 # State should persist
def test_multiple_calls_consistency(self):
"""Test consistency across multiple calls"""
instances = [TestClass() for _ in range(5)]
# All references should point to the same object
first_instance = instances[0]
for instance in instances:
assert instance is first_instance
def test_instance_methods_work(self):
"""Test that instance methods work correctly"""
instance = TestClass()
# Test method calls
result1 = instance.increment()
result2 = instance.increment()
assert result1 == 3
assert result2 == 4
assert instance.counter == 4
# Test decorator itself
def test_singleton_decorator_returns_callable():
"""Test that the decorator returns a callable"""
class PlainClass:
pass
decorated_class = singleton(PlainClass)
# Should return a function
assert callable(decorated_class)
# Calling should return an instance of PlainClass
instance = decorated_class()
assert isinstance(instance, PlainClass)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_decorator.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/common/test_misc_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import hashlib
from common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes
class TestGetUuid:
"""Test cases for get_uuid function"""
def test_returns_string(self):
"""Test that function returns a string"""
result = get_uuid()
assert isinstance(result, str)
def test_hex_format(self):
"""Test that returned string is in hex format"""
result = get_uuid()
# UUID v1 hex should be 32 characters (without dashes)
assert len(result) == 32
# Should only contain hexadecimal characters
assert all(c in '0123456789abcdef' for c in result)
def test_no_dashes_in_result(self):
"""Test that result contains no dashes"""
result = get_uuid()
assert '-' not in result
def test_unique_results(self):
"""Test that multiple calls return different UUIDs"""
results = [get_uuid() for _ in range(10)]
# All results should be unique
assert len(results) == len(set(results))
# All should be valid hex strings of correct length
for result in results:
assert len(result) == 32
assert all(c in '0123456789abcdef' for c in result)
def test_valid_uuid_structure(self):
"""Test that the hex string can be converted back to UUID"""
result = get_uuid()
# Should be able to create UUID from the hex string
reconstructed_uuid = uuid.UUID(hex=result)
assert isinstance(reconstructed_uuid, uuid.UUID)
# The hex representation should match the original
assert reconstructed_uuid.hex == result
def test_uuid1_specific_characteristics(self):
"""Test that UUID v1 characteristics are present"""
result = get_uuid()
uuid_obj = uuid.UUID(hex=result)
# UUID v1 should have version 1
assert uuid_obj.version == 1
# Variant should be RFC 4122
assert uuid_obj.variant == 'specified in RFC 4122'
def test_result_length_consistency(self):
"""Test that all generated UUIDs have consistent length"""
for _ in range(100):
result = get_uuid()
assert len(result) == 32
def test_hex_characters_only(self):
"""Test that only valid hex characters are used"""
for _ in range(100):
result = get_uuid()
# Should only contain lowercase hex characters (UUID hex is lowercase)
assert result.islower()
assert all(c in '0123456789abcdef' for c in result)
class TestDownloadImg:
"""Test cases for download_img function"""
def test_empty_url_returns_empty_string(self):
"""Test that empty URL returns empty string"""
result = download_img("")
assert result == ""
def test_none_url_returns_empty_string(self):
"""Test that None URL returns empty string"""
result = download_img(None)
assert result == ""
class TestHashStr2Int:
"""Test cases for hash_str2int function"""
def test_basic_hashing(self):
"""Test basic string hashing functionality"""
result = hash_str2int("hello")
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
def test_default_mod_value(self):
"""Test that default mod value is 10^8"""
result = hash_str2int("test")
assert 0 <= result < 10 ** 8
def test_custom_mod_value(self):
"""Test with custom mod value"""
result = hash_str2int("test", mod=1000)
assert isinstance(result, int)
assert 0 <= result < 1000
def test_same_input_same_output(self):
"""Test that same input produces same output"""
result1 = hash_str2int("consistent")
result2 = hash_str2int("consistent")
result3 = hash_str2int("consistent")
assert result1 == result2 == result3
def test_different_input_different_output(self):
"""Test that different inputs produce different outputs (usually)"""
result1 = hash_str2int("hello")
result2 = hash_str2int("world")
result3 = hash_str2int("hello world")
# While hash collisions are possible, they're very unlikely for these inputs
results = [result1, result2, result3]
assert len(set(results)) == len(results)
def test_empty_string(self):
"""Test hashing empty string"""
result = hash_str2int("")
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
def test_unicode_string(self):
"""Test hashing unicode strings"""
test_strings = [
"ไธญๆ",
"๐็ซ็ฎญ",
"cafรฉ",
"๐",
"Hello ไธ็"
]
for test_str in test_strings:
result = hash_str2int(test_str)
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
def test_special_characters(self):
"""Test hashing strings with special characters"""
test_strings = [
"hello@world.com",
"test#123",
"line\nwith\nnewlines",
"tab\tcharacter",
"space in string"
]
for test_str in test_strings:
result = hash_str2int(test_str)
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
def test_large_string(self):
"""Test hashing large string"""
large_string = "x" * 10000
result = hash_str2int(large_string)
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
def test_mod_value_1(self):
"""Test with mod value 1 (should always return 0)"""
result = hash_str2int("any string", mod=1)
assert result == 0
def test_mod_value_2(self):
"""Test with mod value 2 (should return 0 or 1)"""
result = hash_str2int("test", mod=2)
assert result in [0, 1]
def test_very_large_mod(self):
"""Test with very large mod value"""
result = hash_str2int("test", mod=10 ** 12)
assert isinstance(result, int)
assert 0 <= result < 10 ** 12
def test_hash_algorithm_sha1(self):
"""Test that SHA1 algorithm is used"""
test_string = "hello"
expected_hash = hashlib.sha1(test_string.encode("utf-8")).hexdigest()
expected_int = int(expected_hash, 16) % (10 ** 8)
result = hash_str2int(test_string)
assert result == expected_int
def test_utf8_encoding(self):
"""Test that UTF-8 encoding is used"""
# This should work without encoding errors
result = hash_str2int("cafรฉ ๐")
assert isinstance(result, int)
def test_range_with_different_mods(self):
"""Test that result is always in correct range for different mod values"""
test_cases = [
("test1", 100),
("test2", 1000),
("test3", 10000),
("test4", 999999),
]
for test_str, mod_val in test_cases:
result = hash_str2int(test_str, mod=mod_val)
assert 0 <= result < mod_val
def test_hexdigest_conversion(self):
"""Test the hexdigest to integer conversion"""
test_string = "hello"
hash_obj = hashlib.sha1(test_string.encode("utf-8"))
hex_digest = hash_obj.hexdigest()
expected_int = int(hex_digest, 16) % (10 ** 8)
result = hash_str2int(test_string)
assert result == expected_int
def test_consistent_with_direct_calculation(self):
"""Test that function matches direct hashlib usage"""
test_strings = ["a", "b", "abc", "hello world", "12345"]
for test_str in test_strings:
direct_result = int(hashlib.sha1(test_str.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
function_result = hash_str2int(test_str)
assert function_result == direct_result
def test_numeric_strings(self):
"""Test hashing numeric strings"""
test_strings = ["123", "0", "999999", "3.14159", "-42"]
for test_str in test_strings:
result = hash_str2int(test_str)
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
def test_whitespace_strings(self):
"""Test hashing strings with various whitespace"""
test_strings = [
" leading",
"trailing ",
" both ",
"\ttab",
"new\nline",
"\r\nwindows"
]
for test_str in test_strings:
result = hash_str2int(test_str)
assert isinstance(result, int)
assert 0 <= result < 10 ** 8
class TestConvertBytes:
"""Test suite for convert_bytes function"""
def test_zero_bytes(self):
"""Test that 0 bytes returns '0 B'"""
assert convert_bytes(0) == "0 B"
def test_single_byte(self):
"""Test single byte values"""
assert convert_bytes(1) == "1 B"
assert convert_bytes(999) == "999 B"
def test_kilobyte_range(self):
"""Test values in kilobyte range with different precisions"""
# Exactly 1 KB
assert convert_bytes(1024) == "1.00 KB"
# Values that should show 1 decimal place (10-99.9 range)
assert convert_bytes(15360) == "15.0 KB" # 15 KB exactly
assert convert_bytes(10752) == "10.5 KB" # 10.5 KB
# Values that should show 2 decimal places (1-9.99 range)
assert convert_bytes(2048) == "2.00 KB" # 2 KB exactly
assert convert_bytes(3072) == "3.00 KB" # 3 KB exactly
assert convert_bytes(5120) == "5.00 KB" # 5 KB exactly
def test_megabyte_range(self):
"""Test values in megabyte range"""
# Exactly 1 MB
assert convert_bytes(1048576) == "1.00 MB"
# Values with different precision requirements
assert convert_bytes(15728640) == "15.0 MB" # 15.0 MB
assert convert_bytes(11010048) == "10.5 MB" # 10.5 MB
def test_gigabyte_range(self):
"""Test values in gigabyte range"""
# Exactly 1 GB
assert convert_bytes(1073741824) == "1.00 GB"
# Large value that should show 0 decimal places
assert convert_bytes(3221225472) == "3.00 GB" # 3 GB exactly
def test_terabyte_range(self):
"""Test values in terabyte range"""
assert convert_bytes(1099511627776) == "1.00 TB" # 1 TB
def test_petabyte_range(self):
"""Test values in petabyte range"""
assert convert_bytes(1125899906842624) == "1.00 PB" # 1 PB
def test_boundary_values(self):
"""Test values at unit boundaries"""
# Just below 1 KB
assert convert_bytes(1023) == "1023 B"
# Just above 1 KB
assert convert_bytes(1025) == "1.00 KB"
# At 100 KB boundary (should switch to 0 decimal places)
assert convert_bytes(102400) == "100 KB"
assert convert_bytes(102300) == "99.9 KB"
def test_precision_transitions(self):
"""Test the precision formatting transitions"""
# Test transition from 2 decimal places to 1 decimal place
assert convert_bytes(9216) == "9.00 KB" # 9.00 KB (2 decimal places)
assert convert_bytes(10240) == "10.0 KB" # 10.0 KB (1 decimal place)
# Test transition from 1 decimal place to 0 decimal places
assert convert_bytes(102400) == "100 KB" # 100 KB (0 decimal places)
def test_large_values_no_overflow(self):
"""Test that very large values don't cause issues"""
# Very large value that should use PB
large_value = 10 * 1125899906842624 # 10 PB
assert "PB" in convert_bytes(large_value)
# Ensure we don't exceed available units
huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)
assert "PB" in convert_bytes(huge_value)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_misc_utils.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/common/test_float_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from common.float_utils import get_float
class TestGetFloat:
def test_valid_float_string(self):
"""Test conversion of valid float strings"""
assert get_float("3.14") == 3.14
assert get_float("-2.5") == -2.5
assert get_float("0.0") == 0.0
assert get_float("123.456") == 123.456
def test_valid_integer_string(self):
"""Test conversion of valid integer strings"""
assert get_float("42") == 42.0
assert get_float("-100") == -100.0
assert get_float("0") == 0.0
def test_valid_numbers(self):
"""Test conversion of actual number types"""
assert get_float(3.14) == 3.14
assert get_float(-2.5) == -2.5
assert get_float(42) == 42.0
assert get_float(0) == 0.0
def test_none_input(self):
"""Test handling of None input"""
result = get_float(None)
assert math.isinf(result)
assert result < 0 # Should be negative infinity
def test_invalid_strings(self):
"""Test handling of invalid string inputs"""
result = get_float("invalid")
assert math.isinf(result)
assert result < 0
result = get_float("12.34.56")
assert math.isinf(result)
assert result < 0
result = get_float("")
assert math.isinf(result)
assert result < 0
def test_boolean_input(self):
"""Test conversion of boolean values"""
assert get_float(True) == 1.0
assert get_float(False) == 0.0
def test_special_float_strings(self):
"""Test handling of special float strings"""
assert get_float("inf") == float('inf')
assert get_float("-inf") == float('-inf')
# NaN should return -inf according to our function's design
result = get_float("nan")
assert math.isnan(result)
def test_very_large_numbers(self):
"""Test very large number strings"""
assert get_float("1e308") == 1e308
# This will become inf in Python, but let's test it
large_result = get_float("1e500")
assert math.isinf(large_result)
def test_whitespace_strings(self):
"""Test strings with whitespace"""
assert get_float(" 3.14 ") == 3.14
result = get_float(" invalid ")
assert math.isinf(result)
assert result < 0 | {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_float_utils.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/unit_test/common/test_string_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common.string_utils import remove_redundant_spaces, clean_markdown_block
class TestRemoveRedundantSpaces:
# Basic punctuation tests
@pytest.mark.skip(reason="Failed")
def test_remove_spaces_before_commas(self):
"""Test removing spaces before commas"""
input_text = "Hello , world"
expected = "Hello, world"
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_remove_spaces_before_periods(self):
"""Test removing spaces before periods"""
input_text = "This is a test ."
expected = "This is a test."
assert remove_redundant_spaces(input_text) == expected
def test_remove_spaces_before_exclamation(self):
"""Test removing spaces before exclamation marks"""
input_text = "Amazing !"
expected = "Amazing!"
assert remove_redundant_spaces(input_text) == expected
def test_remove_spaces_after_opening_parenthesis(self):
"""Test removing spaces after opening parenthesis"""
input_text = "This is ( test)"
expected = "This is (test)"
assert remove_redundant_spaces(input_text) == expected
def test_remove_spaces_before_closing_parenthesis(self):
"""Test removing spaces before closing parenthesis"""
input_text = "This is (test )"
expected = "This is (test)"
assert remove_redundant_spaces(input_text) == expected
def test_keep_spaces_between_words(self):
"""Test preserving normal spaces between words"""
input_text = "This should remain unchanged"
expected = "This should remain unchanged"
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_mixed_punctuation(self):
"""Test mixed punctuation scenarios"""
input_text = "Hello , world ! This is ( test ) ."
expected = "Hello, world! This is (test)."
assert remove_redundant_spaces(input_text) == expected
# Numbers and special formats
@pytest.mark.skip(reason="Failed")
def test_with_numbers(self):
"""Test handling of numbers"""
input_text = "I have 100 , 000 dollars ."
expected = "I have 100, 000 dollars."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_decimal_numbers(self):
"""Test decimal numbers"""
input_text = "The value is 3 . 14 ."
expected = "The value is 3.14."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_time_format(self):
"""Test time format handling"""
input_text = "Time is 12 : 30 PM ."
expected = "Time is 12:30 PM."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_currency_symbols(self):
"""Test currency symbols"""
input_text = "Price : โฌ 100 , ยฃ 50 , ยฅ 1000 ."
expected = "Price: โฌ100, ยฃ50, ยฅ1000."
assert remove_redundant_spaces(input_text) == expected
# Edge cases and special characters
def test_empty_string(self):
"""Test empty string input"""
assert remove_redundant_spaces("") == ""
def test_only_spaces(self):
"""Test input with only spaces"""
input_text = " "
expected = " "
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_no_redundant_spaces(self):
"""Test text without redundant spaces"""
input_text = "Hello, world! This is (test)."
expected = "Hello, world! This is (test)."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_multiple_spaces(self):
"""Test multiple consecutive spaces"""
input_text = "Hello , world !"
expected = "Hello, world!"
assert remove_redundant_spaces(input_text) == expected
def test_angle_brackets(self):
"""Test angle brackets handling"""
input_text = "This is < test >"
expected = "This is <test>"
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_case_insensitive(self):
"""Test case insensitivity"""
input_text = "HELLO , World !"
expected = "HELLO, World!"
assert remove_redundant_spaces(input_text) == expected
# Additional punctuation marks
@pytest.mark.skip(reason="Failed")
def test_semicolon_and_colon(self):
"""Test semicolon and colon handling"""
input_text = "Items : apple ; banana ; orange ."
expected = "Items: apple; banana; orange."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_quotation_marks(self):
"""Test quotation marks handling"""
input_text = 'He said , " Hello " .'
expected = 'He said, "Hello".'
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_abbreviations(self):
"""Test abbreviations"""
input_text = "Dr . Smith and Mr . Jones ."
expected = "Dr. Smith and Mr. Jones."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_multiple_punctuation(self):
"""Test multiple consecutive punctuation marks"""
input_text = "Wow !! ... Really ??"
expected = "Wow!! ... Really??"
assert remove_redundant_spaces(input_text) == expected
# Special text formats
@pytest.mark.skip(reason="Failed")
def test_email_addresses(self):
"""Test email addresses (should not be modified ideally)"""
input_text = "Contact me at test @ example . com ."
expected = "Contact me at test@example.com."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_urls(self):
"""Test URLs (might be modified by current function)"""
input_text = "Visit https : //example.com / path ."
expected = "Visit https://example.com/path."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_hashtags_and_mentions(self):
"""Test hashtags and mentions"""
input_text = "Check out # topic and @ user ."
expected = "Check out #topic and @user."
assert remove_redundant_spaces(input_text) == expected
# Complex structures
@pytest.mark.skip(reason="Failed")
def test_nested_parentheses(self):
"""Test nested parentheses"""
input_text = "Outer ( inner ( deep ) ) ."
expected = "Outer (inner (deep))."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_math_expressions(self):
"""Test mathematical expressions"""
input_text = "Calculate 2 + 2 = 4 ."
expected = "Calculate 2 + 2 = 4."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_html_tags(self):
"""Test HTML tags"""
input_text = "< p > This is a paragraph . < / p >"
expected = "<p> This is a paragraph. </p>"
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_programming_code(self):
"""Test programming code snippets"""
input_text = "Code : if ( x > 0 ) { print ( 'hello' ) ; }"
expected = "Code: if (x > 0) {print ('hello');}"
assert remove_redundant_spaces(input_text) == expected
# Unicode and special symbols
@pytest.mark.skip(reason="Failed")
def test_unicode_and_special_symbols(self):
"""Test Unicode characters and special symbols"""
input_text = "Copyright ยฉ 2023 , All rights reserved ."
expected = "Copyright ยฉ 2023, All rights reserved."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_mixed_chinese_english(self):
"""Test mixed Chinese and English text"""
input_text = "ไฝ ๅฅฝ , world ! ่ฟๆฏ ( ๆต่ฏ ) ."
expected = "ไฝ ๅฅฝ, world! ่ฟๆฏ (ๆต่ฏ)."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_special_characters_in_pattern(self):
"""Test special characters in the pattern"""
input_text = "Price is $ 100 . 00 , tax included ."
expected = "Price is $100.00, tax included."
assert remove_redundant_spaces(input_text) == expected
@pytest.mark.skip(reason="Failed")
def test_tabs_and_newlines(self):
"""Test tabs and newlines handling"""
input_text = "Hello ,\tworld !\nThis is ( test ) ."
expected = "Hello,\tworld!\nThis is (test)."
assert remove_redundant_spaces(input_text) == expected
class TestCleanMarkdownBlock:
def test_standard_markdown_block(self):
"""Test standard Markdown code block syntax"""
input_text = "```markdown\nHello world\n```"
expected = "Hello world"
assert clean_markdown_block(input_text) == expected
def test_with_whitespace_variations(self):
"""Test markdown blocks with various whitespace patterns"""
input_text = " ```markdown \n Content here \n ``` "
expected = "Content here"
assert clean_markdown_block(input_text) == expected
def test_multiline_content(self):
"""Test markdown blocks with multiple lines of content"""
input_text = "```markdown\nLine 1\nLine 2\nLine 3\n```"
expected = "Line 1\nLine 2\nLine 3"
assert clean_markdown_block(input_text) == expected
def test_no_opening_newline(self):
"""Test markdown block without newline after opening tag"""
input_text = "```markdownHello world\n```"
expected = "Hello world"
assert clean_markdown_block(input_text) == expected
def test_no_closing_newline(self):
"""Test markdown block without newline before closing tag"""
input_text = "```markdown\nHello world```"
expected = "Hello world"
assert clean_markdown_block(input_text) == expected
def test_empty_markdown_block(self):
"""Test empty Markdown code block"""
input_text = "```markdown\n```"
expected = ""
assert clean_markdown_block(input_text) == expected
def test_only_whitespace_content(self):
"""Test markdown block containing only whitespace"""
input_text = "```markdown\n \n\t\n\n```"
expected = ""
assert clean_markdown_block(input_text) == expected
def test_plain_text_without_markdown(self):
"""Test text that doesn't contain markdown block syntax"""
input_text = "This is plain text without any code blocks"
expected = "This is plain text without any code blocks"
assert clean_markdown_block(input_text) == expected
def test_partial_markdown_syntax(self):
"""Test text with only opening or closing tags"""
input_text = "```markdown\nUnclosed block"
expected = "Unclosed block"
assert clean_markdown_block(input_text) == expected
input_text = "Unopened block\n```"
expected = "Unopened block"
assert clean_markdown_block(input_text) == expected
def test_mixed_whitespace_characters(self):
"""Test with tabs, spaces, and mixed whitespace"""
input_text = "\t```markdown\t\n\tContent with tabs\n\t```\t"
expected = "Content with tabs"
assert clean_markdown_block(input_text) == expected
def test_preserves_internal_whitespace(self):
"""Test that internal whitespace is preserved"""
input_text = "```markdown\n Preserve internal \n whitespace \n```"
expected = "Preserve internal \n whitespace"
assert clean_markdown_block(input_text) == expected
def test_special_characters_content(self):
"""Test markdown block with special characters"""
input_text = "```markdown\n# Header\n**Bold** and *italic*\n```"
expected = "# Header\n**Bold** and *italic*"
assert clean_markdown_block(input_text) == expected
def test_empty_string(self):
"""Test empty string input"""
input_text = ""
expected = ""
assert clean_markdown_block(input_text) == expected
def test_only_markdown_tags(self):
"""Test input containing only Markdown tags"""
input_text = "```markdown```"
expected = ""
assert clean_markdown_block(input_text) == expected
def test_windows_line_endings(self):
"""Test markdown block with Windows line endings"""
input_text = "```markdown\r\nHello world\r\n```"
expected = "Hello world"
assert clean_markdown_block(input_text) == expected
def test_unix_line_endings(self):
"""Test markdown block with Unix line endings"""
input_text = "```markdown\nHello world\n```"
expected = "Hello world"
assert clean_markdown_block(input_text) == expected
def test_nested_code_blocks_preserved(self):
"""Test that nested code blocks within content are preserved"""
input_text = "```markdown\nText with ```nested``` blocks\n```"
expected = "Text with ```nested``` blocks"
assert clean_markdown_block(input_text) == expected
def test_multiple_markdown_blocks(self):
"""Test behavior with multiple markdown blocks (takes first and last)"""
input_text = "```markdown\nFirst line\n```\n```markdown\nSecond line\n```"
expected = "First line\n```\n```markdown\nSecond line"
assert clean_markdown_block(input_text) == expected
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/common/test_string_utils.py",
"license": "Apache License 2.0",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:deepdoc/parser/tcadp_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import logging
import os
import re
import shutil
import tempfile
import time
import traceback
import types
import zipfile
from datetime import datetime
from io import BytesIO
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Optional
import requests
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.lkeap.v20240522 import lkeap_client, models
from common.config_utils import get_base_config
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
class TencentCloudAPIClient:
"""Tencent Cloud API client using official SDK"""
def __init__(self, secret_id, secret_key, region):
self.secret_id = secret_id
self.secret_key = secret_key
self.region = region
self.outlines = []
# Create credentials
self.cred = credential.Credential(secret_id, secret_key)
# Instantiate an http option, optional, can be skipped if no special requirements
self.httpProfile = HttpProfile()
self.httpProfile.endpoint = "lkeap.tencentcloudapi.com"
# Instantiate a client option, optional, can be skipped if no special requirements
self.clientProfile = ClientProfile()
self.clientProfile.httpProfile = self.httpProfile
# Instantiate the client object for the product to be requested, clientProfile is optional
self.client = lkeap_client.LkeapClient(self.cred, region, self.clientProfile)
def reconstruct_document_sse(self, file_type, file_url=None, file_base64=None, file_start_page=1, file_end_page=1000, config=None):
"""Call document parsing API using official SDK"""
try:
# Instantiate a request object, each interface corresponds to a request object
req = models.ReconstructDocumentSSERequest()
# Build request parameters
params = {
"FileType": file_type,
"FileStartPageNumber": file_start_page,
"FileEndPageNumber": file_end_page,
}
# According to Tencent Cloud API documentation, either FileUrl or FileBase64 parameter must be provided, if both are provided only FileUrl will be used
if file_url:
params["FileUrl"] = file_url
logging.info(f"[TCADP] Using file URL: {file_url}")
elif file_base64:
params["FileBase64"] = file_base64
logging.info(f"[TCADP] Using Base64 data, length: {len(file_base64)} characters")
else:
raise ValueError("Must provide either FileUrl or FileBase64 parameter")
if config:
params["Config"] = config
req.from_json_string(json.dumps(params))
# The returned resp is an instance of ReconstructDocumentSSEResponse, corresponding to the request object
resp = self.client.ReconstructDocumentSSE(req)
parser_result = {}
# Output json format string response
if isinstance(resp, types.GeneratorType): # Streaming response
logging.info("[TCADP] Detected streaming response")
for event in resp:
logging.info(f"[TCADP] Received event: {event}")
if event.get('data'):
try:
data_dict = json.loads(event['data'])
logging.info(f"[TCADP] Parsed data: {data_dict}")
if data_dict.get('Progress') == "100":
parser_result = data_dict
logging.info("[TCADP] Document parsing completed!")
logging.info(f"[TCADP] Task ID: {data_dict.get('TaskId')}")
logging.info(f"[TCADP] Success pages: {data_dict.get('SuccessPageNum')}")
logging.info(f"[TCADP] Failed pages: {data_dict.get('FailPageNum')}")
# Print failed page information
failed_pages = data_dict.get("FailedPages", [])
if failed_pages:
logging.warning("[TCADP] Failed parsing pages:")
for page in failed_pages:
logging.warning(f"[TCADP] Page number: {page.get('PageNumber')}, Error: {page.get('ErrorMsg')}")
# Check if there is a download link
download_url = data_dict.get("DocumentRecognizeResultUrl")
if download_url:
logging.info(f"[TCADP] Got download link: {download_url}")
else:
logging.warning("[TCADP] No download link obtained")
break # Found final result, exit loop
else:
# Print progress information
progress = data_dict.get("Progress", "0")
logging.info(f"[TCADP] Progress: {progress}%")
except json.JSONDecodeError as e:
logging.error(f"[TCADP] Failed to parse JSON data: {e}")
logging.error(f"[TCADP] Raw data: {event.get('data')}")
continue
else:
logging.info(f"[TCADP] Event without data: {event}")
else: # Non-streaming response
logging.info("[TCADP] Detected non-streaming response")
if hasattr(resp, 'data') and resp.data:
try:
data_dict = json.loads(resp.data)
parser_result = data_dict
logging.info(f"[TCADP] JSON parsing successful: {parser_result}")
except json.JSONDecodeError as e:
logging.error(f"[TCADP] JSON parsing failed: {e}")
return None
else:
logging.error("[TCADP] No data in response")
return None
return parser_result
except TencentCloudSDKException as err:
logging.error(f"[TCADP] Tencent Cloud SDK error: {err}")
return None
except Exception as e:
logging.error(f"[TCADP] Unknown error: {e}")
logging.error(f"[TCADP] Error stack trace: {traceback.format_exc()}")
return None
def download_result_file(self, download_url, output_dir):
"""Download parsing result file"""
if not download_url:
logging.warning("[TCADP] No downloadable result file")
return None
try:
# Ensure output directory exists
os.makedirs(output_dir, exist_ok=True)
# Generate filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"tcadp_result_{timestamp}.zip"
file_path = os.path.join(output_dir, filename)
with requests.get(download_url, stream=True) as response:
response.raise_for_status()
with open(file_path, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
logging.info(f"[TCADP] Document parsing result downloaded to: {os.path.basename(file_path)}")
return file_path
except Exception as e:
logging.error(f"[TCADP] Failed to download file: {e}")
try:
if "file_path" in locals() and os.path.exists(file_path):
os.unlink(file_path)
except Exception:
pass
return None
class TCADPParser(RAGFlowPdfParser):
def __init__(self, secret_id: str = None, secret_key: str = None, region: str = "ap-guangzhou",
table_result_type: str = None, markdown_image_response_type: str = None):
super().__init__()
# First initialize logger
self.logger = logging.getLogger(self.__class__.__name__)
# Log received parameters
self.logger.info(f"[TCADP] Initializing with parameters - table_result_type: {table_result_type}, markdown_image_response_type: {markdown_image_response_type}")
# Priority: read configuration from RAGFlow configuration system (service_conf.yaml)
try:
tcadp_parser = get_base_config("tcadp_config", {})
if isinstance(tcadp_parser, dict) and tcadp_parser:
self.secret_id = secret_id or tcadp_parser.get("secret_id")
self.secret_key = secret_key or tcadp_parser.get("secret_key")
self.region = region or tcadp_parser.get("region", "ap-guangzhou")
# Set table_result_type and markdown_image_response_type from config or parameters
self.table_result_type = table_result_type if table_result_type is not None else tcadp_parser.get("table_result_type", "1")
self.markdown_image_response_type = markdown_image_response_type if markdown_image_response_type is not None else tcadp_parser.get("markdown_image_response_type", "1")
else:
self.logger.error("[TCADP] Please configure tcadp_config in service_conf.yaml first")
# If config file is empty, use provided parameters or defaults
self.secret_id = secret_id
self.secret_key = secret_key
self.region = region or "ap-guangzhou"
self.table_result_type = table_result_type if table_result_type is not None else "1"
self.markdown_image_response_type = markdown_image_response_type if markdown_image_response_type is not None else "1"
except ImportError:
self.logger.info("[TCADP] Configuration module import failed")
# If config file is not available, use provided parameters or defaults
self.secret_id = secret_id
self.secret_key = secret_key
self.region = region or "ap-guangzhou"
self.table_result_type = table_result_type if table_result_type is not None else "1"
self.markdown_image_response_type = markdown_image_response_type if markdown_image_response_type is not None else "1"
# Log final values
self.logger.info(f"[TCADP] Final values - table_result_type: {self.table_result_type}, markdown_image_response_type: {self.markdown_image_response_type}")
if not self.secret_id or not self.secret_key:
raise ValueError("[TCADP] Please set Tencent Cloud API keys, configure tcadp_config in service_conf.yaml")
@staticmethod
def _is_zipinfo_symlink(member: zipfile.ZipInfo) -> bool:
return (member.external_attr >> 16) & 0o170000 == 0o120000
def check_installation(self) -> bool:
"""Check if Tencent Cloud API configuration is correct"""
try:
# Check necessary configuration parameters
if not self.secret_id or not self.secret_key:
self.logger.error("[TCADP] Tencent Cloud API configuration incomplete")
return False
# Try to create client to verify configuration
TencentCloudAPIClient(self.secret_id, self.secret_key, self.region)
self.logger.info("[TCADP] Tencent Cloud API configuration check passed")
return True
except Exception as e:
self.logger.error(f"[TCADP] Tencent Cloud API configuration check failed: {e}")
return False
def _file_to_base64(self, file_path: str, binary: bytes = None) -> str:
"""Convert file to Base64 format"""
if binary:
# If binary data is directly available, convert directly
return base64.b64encode(binary).decode('utf-8')
else:
# Read from file path and convert
with open(file_path, 'rb') as f:
file_data = f.read()
return base64.b64encode(file_data).decode('utf-8')
def _extract_content_from_zip(self, zip_path: str) -> list[dict[str, Any]]:
"""Extract parsing results from downloaded ZIP file"""
results = []
try:
with zipfile.ZipFile(zip_path, "r") as zip_file:
members = zip_file.infolist()
for member in members:
name = member.filename.replace("\\", "/")
if member.is_dir():
continue
if member.flag_bits & 0x1:
raise RuntimeError(f"[TCADP] Encrypted zip entry not supported: {member.filename}")
if self._is_zipinfo_symlink(member):
raise RuntimeError(f"[TCADP] Symlink zip entry not supported: {member.filename}")
if name.startswith("/") or name.startswith("//") or re.match(r"^[A-Za-z]:", name):
raise RuntimeError(f"[TCADP] Unsafe zip path (absolute): {member.filename}")
parts = [p for p in name.split("/") if p not in ("", ".")]
if any(p == ".." for p in parts):
raise RuntimeError(f"[TCADP] Unsafe zip path (traversal): {member.filename}")
if not (name.endswith(".json") or name.endswith(".md")):
continue
with zip_file.open(member) as f:
if name.endswith(".json"):
data = json.load(f)
if isinstance(data, list):
results.extend(data)
else:
results.append(data)
else:
content = f.read().decode("utf-8")
results.append({"type": "text", "content": content, "file": name})
except Exception as e:
self.logger.error(f"[TCADP] Failed to extract ZIP file content: {e}")
return results
def _parse_content_to_sections(self, content_data: list[dict[str, Any]]) -> list[tuple[str, str]]:
"""Convert parsing results to sections format"""
sections = []
for item in content_data:
content_type = item.get("type", "text")
content = item.get("content", "")
if not content:
continue
# Process based on content type
if content_type == "text" or content_type == "paragraph":
section_text = content
elif content_type == "table":
# Handle table content
table_data = item.get("table_data", {})
if isinstance(table_data, dict):
# Convert table data to text
rows = table_data.get("rows", [])
section_text = "\n".join([" | ".join(row) for row in rows])
else:
section_text = str(table_data)
elif content_type == "image":
# Handle image content
caption = item.get("caption", "")
section_text = f"[Image] {caption}" if caption else "[Image]"
elif content_type == "equation":
# Handle equation content
section_text = f"$${content}$$"
else:
section_text = content
if section_text.strip():
# Generate position tag (simplified version)
position_tag = "@@1\t0.0\t1000.0\t0.0\t100.0##"
sections.append((section_text, position_tag))
return sections
def _parse_content_to_tables(self, content_data: list[dict[str, Any]]) -> list:
"""Convert parsing results to tables format"""
tables = []
for item in content_data:
if item.get("type") == "table":
table_data = item.get("table_data", {})
if isinstance(table_data, dict):
rows = table_data.get("rows", [])
if rows:
# Convert to table format
table_html = "<table>\n"
for i, row in enumerate(rows):
table_html += " <tr>\n"
for cell in row:
tag = "th" if i == 0 else "td"
table_html += f" <{tag}>{cell}</{tag}>\n"
table_html += " </tr>\n"
table_html += "</table>"
tables.append(table_html)
return tables
def parse_pdf(
self,
filepath: str | PathLike[str],
binary: BytesIO | bytes,
callback: Optional[Callable] = None,
*,
output_dir: Optional[str] = None,
file_type: str = "PDF",
file_start_page: Optional[int] = 1,
file_end_page: Optional[int] = 1000,
delete_output: Optional[bool] = True,
max_retries: Optional[int] = 1,
) -> tuple:
"""Parse PDF document"""
temp_file = None
created_tmp_dir = False
try:
# Handle input file
if binary:
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
temp_file.write(binary)
temp_file.close()
file_path = temp_file.name
self.logger.info(f"[TCADP] Received binary PDF -> {os.path.basename(file_path)}")
if callback:
callback(0.1, f"[TCADP] Received binary PDF -> {os.path.basename(file_path)}")
else:
file_path = str(filepath)
if not os.path.exists(file_path):
if callback:
callback(-1, f"[TCADP] PDF file does not exist: {file_path}")
raise FileNotFoundError(f"[TCADP] PDF file does not exist: {file_path}")
# Convert file to Base64 format
if callback:
callback(0.2, "[TCADP] Converting file to Base64 format")
file_base64 = self._file_to_base64(file_path, binary)
if callback:
callback(0.25, f"[TCADP] File converted to Base64, size: {len(file_base64)} characters")
# Create Tencent Cloud API client
client = TencentCloudAPIClient(self.secret_id, self.secret_key, self.region)
# Call document parsing API (with retry mechanism)
if callback:
callback(0.3, "[TCADP] Starting to call Tencent Cloud document parsing API")
result = None
for attempt in range(max_retries):
try:
if attempt > 0:
self.logger.info(f"[TCADP] Retry attempt {attempt + 1}")
if callback:
callback(0.3 + attempt * 0.1, f"[TCADP] Retry attempt {attempt + 1}")
time.sleep(2 ** attempt) # Exponential backoff
config = {
"TableResultType": self.table_result_type,
"MarkdownImageResponseType": self.markdown_image_response_type
}
self.logger.info(f"[TCADP] API request config - TableResultType: {self.table_result_type}, MarkdownImageResponseType: {self.markdown_image_response_type}")
result = client.reconstruct_document_sse(
file_type=file_type,
file_base64=file_base64,
file_start_page=file_start_page,
file_end_page=file_end_page,
config=config
)
if result:
self.logger.info(f"[TCADP] Attempt {attempt + 1} successful")
break
else:
self.logger.warning(f"[TCADP] Attempt {attempt + 1} failed, result is None")
except Exception as e:
self.logger.error(f"[TCADP] Attempt {attempt + 1} exception: {e}")
if attempt == max_retries - 1:
raise
if not result:
error_msg = f"[TCADP] Document parsing failed, retried {max_retries} times"
self.logger.error(error_msg)
if callback:
callback(-1, error_msg)
raise RuntimeError(error_msg)
# Get download link
download_url = result.get("DocumentRecognizeResultUrl")
if not download_url:
if callback:
callback(-1, "[TCADP] No parsing result download link obtained")
raise RuntimeError("[TCADP] No parsing result download link obtained")
if callback:
callback(0.6, f"[TCADP] Parsing result download link: {download_url}")
# Set output directory
if output_dir:
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
else:
out_dir = Path(tempfile.mkdtemp(prefix="adp_pdf_"))
created_tmp_dir = True
# Download result file
zip_path = client.download_result_file(download_url, str(out_dir))
if not zip_path:
if callback:
callback(-1, "[TCADP] Failed to download parsing result")
raise RuntimeError("[TCADP] Failed to download parsing result")
if callback:
# Shorten file path display, only show filename
zip_filename = os.path.basename(zip_path)
callback(0.8, f"[TCADP] Parsing result downloaded: {zip_filename}")
# Extract ZIP file content
content_data = self._extract_content_from_zip(zip_path)
self.logger.info(f"[TCADP] Extracted {len(content_data)} content blocks")
if callback:
callback(0.9, f"[TCADP] Extracted {len(content_data)} content blocks")
# Convert to sections and tables format
sections = self._parse_content_to_sections(content_data)
tables = self._parse_content_to_tables(content_data)
self.logger.info(f"[TCADP] Parsing completed: {len(sections)} sections, {len(tables)} tables")
if callback:
callback(1.0, f"[TCADP] Parsing completed: {len(sections)} sections, {len(tables)} tables")
return sections, tables
finally:
# Clean up temporary files
if temp_file and os.path.exists(temp_file.name):
try:
os.unlink(temp_file.name)
except Exception:
pass
if delete_output and created_tmp_dir and out_dir.exists():
try:
shutil.rmtree(out_dir)
except Exception:
pass
if __name__ == "__main__":
# Test ADP parser
parser = TCADPParser()
print("ADP available:", parser.check_installation())
# Test parsing
filepath = ""
if filepath and os.path.exists(filepath):
with open(filepath, "rb") as file:
sections, tables = parser.parse_pdf(filepath=filepath, binary=file.read())
print(f"Parsing result: {len(sections)} sections, {len(tables)} tables")
for i, (section, tag) in enumerate(sections[:3]): # Only print first 3
print(f"Section {i + 1}: {section[:100]}...")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "deepdoc/parser/tcadp_parser.py",
"license": "Apache License 2.0",
"lines": 460,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:deepdoc/parser/docling_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import logging
import re
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Iterable, Optional
import pdfplumber
from PIL import Image
try:
from docling.document_converter import DocumentConverter
except Exception:
DocumentConverter = None
try:
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
except Exception:
class RAGFlowPdfParser:
pass
class DoclingContentType(str, Enum):
IMAGE = "image"
TABLE = "table"
TEXT = "text"
EQUATION = "equation"
@dataclass
class _BBox:
page_no: int
x0: float
y0: float
x1: float
y1: float
def _extract_bbox_from_prov(item, prov_attr: str = "prov") -> Optional[_BBox]:
prov = getattr(item, prov_attr, None)
if not prov:
return None
prov_item = prov[0] if isinstance(prov, list) else prov
pn = getattr(prov_item, "page_no", None)
bb = getattr(prov_item, "bbox", None)
if pn is None or bb is None:
return None
coords = [getattr(bb, attr) for attr in ("l", "t", "r", "b")]
if None in coords:
return None
return _BBox(page_no=int(pn), x0=coords[0], y0=coords[1], x1=coords[2], y1=coords[3])
class DoclingParser(RAGFlowPdfParser):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.page_images: list[Image.Image] = []
self.page_from = 0
self.page_to = 10_000
self.outlines = []
def check_installation(self) -> bool:
if DocumentConverter is None:
self.logger.warning("[Docling] 'docling' is not importable, please: pip install docling")
return False
try:
_ = DocumentConverter()
return True
except Exception as e:
self.logger.error(f"[Docling] init DocumentConverter failed: {e}")
return False
def __images__(self, fnm, zoomin: int = 1, page_from=0, page_to=600, callback=None):
self.page_from = page_from
self.page_to = page_to
bytes_io = None
try:
if not isinstance(fnm, (str, PathLike)):
bytes_io = BytesIO(fnm)
opener = pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(bytes_io)
with opener as pdf:
pages = pdf.pages[page_from:page_to]
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for p in pages]
except Exception as e:
self.page_images = []
self.logger.exception(e)
finally:
if bytes_io:
bytes_io.close()
def _make_line_tag(self,bbox: _BBox) -> str:
if bbox is None:
return ""
x0,x1, top, bott = bbox.x0, bbox.x1, bbox.y0, bbox.y1
if hasattr(self, "page_images") and self.page_images and len(self.page_images) >= bbox.page_no:
_, page_height = self.page_images[bbox.page_no-1].size
top, bott = page_height-top ,page_height-bott
return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##".format(
bbox.page_no, x0,x1, top, bott
)
@staticmethod
def extract_positions(txt: str) -> list[tuple[list[int], float, float, float, float]]:
poss = []
for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", txt):
pn, left, right, top, bottom = tag.strip("#").strip("@").split("\t")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom))
return poss
def crop(self, text: str, ZM: int = 1, need_position: bool = False):
imgs = []
poss = self.extract_positions(text)
if not poss:
return (None, None) if need_position else None
GAP = 6
pos = poss[0]
poss.insert(0, ([pos[0][0]], pos[1], pos[2], max(0, pos[3] - 120), max(pos[3] - GAP, 0)))
pos = poss[-1]
poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1], pos[4] + GAP), min(self.page_images[pos[0][-1]].size[1], pos[4] + 120)))
positions = []
for ii, (pns, left, right, top, bottom) in enumerate(poss):
if bottom <= top:
bottom = top + 4
img0 = self.page_images[pns[0]]
x0, y0, x1, y1 = int(left), int(top), int(right), int(min(bottom, img0.size[1]))
crop0 = img0.crop((x0, y0, x1, y1))
imgs.append(crop0)
if 0 < ii < len(poss)-1:
positions.append((pns[0] + self.page_from, x0, x1, y0, y1))
remain_bottom = bottom - img0.size[1]
for pn in pns[1:]:
if remain_bottom <= 0:
break
page = self.page_images[pn]
x0, y0, x1, y1 = int(left), 0, int(right), int(min(remain_bottom, page.size[1]))
cimgp = page.crop((x0, y0, x1, y1))
imgs.append(cimgp)
if 0 < ii < len(poss) - 1:
positions.append((pn + self.page_from, x0, x1, y0, y1))
remain_bottom -= page.size[1]
if not imgs:
return (None, None) if need_position else None
height = sum(i.size[1] + GAP for i in imgs)
width = max(i.size[0] for i in imgs)
pic = Image.new("RGB", (width, int(height)), (245, 245, 245))
h = 0
for ii, img in enumerate(imgs):
if ii == 0 or ii + 1 == len(imgs):
img = img.convert("RGBA")
overlay = Image.new("RGBA", img.size, (0, 0, 0, 0))
overlay.putalpha(128)
img = Image.alpha_composite(img, overlay).convert("RGB")
pic.paste(img, (0, int(h)))
h += img.size[1] + GAP
return (pic, positions) if need_position else pic
def _iter_doc_items(self, doc) -> Iterable[tuple[str, Any, Optional[_BBox]]]:
for t in getattr(doc, "texts", []):
parent = getattr(t, "parent", "")
ref = getattr(parent, "cref", "")
label = getattr(t, "label", "")
if (label in ("section_header", "text") and ref in ("#/body",)) or label in ("list_item",):
text = getattr(t, "text", "") or ""
bbox = _extract_bbox_from_prov(t)
yield (DoclingContentType.TEXT.value, text, bbox)
for item in getattr(doc, "texts", []):
if getattr(item, "label", "") in ("FORMULA",):
text = getattr(item, "text", "") or ""
bbox = _extract_bbox_from_prov(item)
yield (DoclingContentType.EQUATION.value, text, bbox)
def _transfer_to_sections(self, doc, parse_method: str) -> list[tuple[str, ...]]:
sections: list[tuple[str, ...]] = []
for typ, payload, bbox in self._iter_doc_items(doc):
if typ == DoclingContentType.TEXT.value:
section = payload.strip()
if not section:
continue
elif typ == DoclingContentType.EQUATION.value:
section = payload.strip()
else:
continue
tag = self._make_line_tag(bbox) if isinstance(bbox,_BBox) else ""
if parse_method == "manual":
sections.append((section, typ, tag))
elif parse_method == "paper":
sections.append((section + tag, typ))
else:
sections.append((section, tag))
return sections
def cropout_docling_table(self, page_no: int, bbox: tuple[float, float, float, float], zoomin: int = 1):
if not getattr(self, "page_images", None):
return None, ""
idx = (page_no - 1) - getattr(self, "page_from", 0)
if idx < 0 or idx >= len(self.page_images):
return None, ""
page_img = self.page_images[idx]
W, H = page_img.size
left, top, right, bott = bbox
x0 = float(left)
y0 = float(H-top)
x1 = float(right)
y1 = float(H-bott)
x0, y0 = max(0.0, min(x0, W - 1)), max(0.0, min(y0, H - 1))
x1, y1 = max(x0 + 1.0, min(x1, W)), max(y0 + 1.0, min(y1, H))
try:
crop = page_img.crop((int(x0), int(y0), int(x1), int(y1))).convert("RGB")
except Exception:
return None, ""
pos = (page_no-1 if page_no>0 else 0, x0, x1, y0, y1)
return crop, [pos]
def _transfer_to_tables(self, doc):
tables = []
for tab in getattr(doc, "tables", []):
img = None
positions = ""
bbox = _extract_bbox_from_prov(tab)
if bbox:
img, positions = self.cropout_docling_table(bbox.page_no, (bbox.x0, bbox.y0, bbox.x1, bbox.y1))
html = ""
try:
html = tab.export_to_html(doc=doc)
except Exception:
pass
tables.append(((img, html), positions if positions else ""))
for pic in getattr(doc, "pictures", []):
img = None
positions = ""
bbox = _extract_bbox_from_prov(pic)
if bbox:
img, positions = self.cropout_docling_table(bbox.page_no, (bbox.x0, bbox.y0, bbox.x1, bbox.y1))
captions = ""
try:
captions = pic.caption_text(doc=doc)
except Exception:
pass
tables.append(((img, [captions]), positions if positions else ""))
return tables
def parse_pdf(
self,
filepath: str | PathLike[str],
binary: BytesIO | bytes | None = None,
callback: Optional[Callable] = None,
*,
output_dir: Optional[str] = None,
lang: Optional[str] = None,
method: str = "auto",
delete_output: bool = True,
parse_method: str = "raw"
):
if not self.check_installation():
raise RuntimeError("Docling not available, please install `docling`")
if binary is not None:
tmpdir = Path(output_dir) if output_dir else Path.cwd() / ".docling_tmp"
tmpdir.mkdir(parents=True, exist_ok=True)
name = Path(filepath).name or "input.pdf"
tmp_pdf = tmpdir / name
with open(tmp_pdf, "wb") as f:
if isinstance(binary, (bytes, bytearray)):
f.write(binary)
else:
f.write(binary.getbuffer())
src_path = tmp_pdf
else:
src_path = Path(filepath)
if not src_path.exists():
raise FileNotFoundError(f"PDF not found: {src_path}")
if callback:
callback(0.1, f"[Docling] Converting: {src_path}")
try:
self.__images__(str(src_path), zoomin=1)
except Exception as e:
self.logger.warning(f"[Docling] render pages failed: {e}")
conv = DocumentConverter()
conv_res = conv.convert(str(src_path))
doc = conv_res.document
if callback:
callback(0.7, f"[Docling] Parsed doc: {getattr(doc, 'num_pages', 'n/a')} pages")
sections = self._transfer_to_sections(doc, parse_method=parse_method)
tables = self._transfer_to_tables(doc)
if callback:
callback(0.95, f"[Docling] Sections: {len(sections)}, Tables: {len(tables)}")
if binary is not None and delete_output:
try:
Path(src_path).unlink(missing_ok=True)
except Exception:
pass
if callback:
callback(1.0, "[Docling] Done.")
return sections, tables
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = DoclingParser()
print("Docling available:", parser.check_installation())
sections, tables = parser.parse_pdf(filepath="test_docling/toc.pdf", binary=None)
print(len(sections), len(tables))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "deepdoc/parser/docling_parser.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:admin/server/roles.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import Dict, Any
from api.common.exceptions import AdminException
class RoleMgr:
@staticmethod
def create_role(role_name: str, description: str):
error_msg = f"not implement: create role: {role_name}, description: {description}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def update_role_description(role_name: str, description: str) -> Dict[str, Any]:
error_msg = f"not implement: update role: {role_name} with description: {description}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def delete_role(role_name: str) -> Dict[str, Any]:
error_msg = f"not implement: drop role: {role_name}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def list_roles() -> Dict[str, Any]:
error_msg = "not implement: list roles"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def get_role_permission(role_name: str) -> Dict[str, Any]:
error_msg = f"not implement: show role {role_name}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def grant_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
error_msg = f"not implement: grant role {role_name} actions: {actions} on {resource}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def revoke_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
error_msg = f"not implement: revoke role {role_name} actions: {actions} on {resource}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def update_user_role(user_name: str, role_name: str) -> Dict[str, Any]:
error_msg = f"not implement: update user role: {user_name} to role {role_name}"
logging.error(error_msg)
raise AdminException(error_msg)
@staticmethod
def get_user_permission(user_name: str) -> Dict[str, Any]:
error_msg = f"not implement: get user permission: {user_name}"
logging.error(error_msg)
raise AdminException(error_msg)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "admin/server/roles.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:deepdoc/parser/mineru_parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import shutil
import sys
import tempfile
import threading
import zipfile
from dataclasses import dataclass
from io import BytesIO
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Optional
import numpy as np
import pdfplumber
import requests
from PIL import Image
from strenum import StrEnum
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
if LOCK_KEY_pdfplumber not in sys.modules:
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
class MinerUContentType(StrEnum):
IMAGE = "image"
TABLE = "table"
TEXT = "text"
EQUATION = "equation"
CODE = "code"
LIST = "list"
DISCARDED = "discarded"
# Mapping from language names to MinerU language codes
LANGUAGE_TO_MINERU_MAP = {
'English': 'en',
'Chinese': 'ch',
'Traditional Chinese': 'chinese_cht',
'Russian': 'east_slavic',
'Ukrainian': 'east_slavic',
'Indonesian': 'latin',
'Spanish': 'latin',
'Vietnamese': 'latin',
'Japanese': 'japan',
'Korean': 'korean',
'Portuguese BR': 'latin',
'German': 'latin',
'French': 'latin',
'Italian': 'latin',
'Tamil': 'ta',
'Telugu': 'te',
'Kannada': 'ka',
'Thai': 'th',
'Greek': 'el',
'Hindi': 'devanagari',
'Bulgarian': 'cyrillic',
}
class MinerUBackend(StrEnum):
"""MinerU processing backend options."""
PIPELINE = "pipeline" # Traditional multimodel pipeline (default)
VLM_TRANSFORMERS = "vlm-transformers" # Vision-language model using HuggingFace Transformers
VLM_MLX_ENGINE = "vlm-mlx-engine" # Faster, requires Apple Silicon and macOS 13.5+
VLM_VLLM_ENGINE = "vlm-vllm-engine" # Local vLLM engine, requires local GPU
VLM_VLLM_ASYNC_ENGINE = "vlm-vllm-async-engine" # Asynchronous vLLM engine, new in MinerU API
VLM_LMDEPLOY_ENGINE = "vlm-lmdeploy-engine" # LMDeploy engine
VLM_HTTP_CLIENT = "vlm-http-client" # HTTP client for remote vLLM server (CPU only)
class MinerULanguage(StrEnum):
"""MinerU supported languages for OCR (pipeline backend only)."""
CH = "ch" # Chinese
CH_SERVER = "ch_server" # Chinese (server)
CH_LITE = "ch_lite" # Chinese (lite)
EN = "en" # English
KOREAN = "korean" # Korean
JAPAN = "japan" # Japanese
CHINESE_CHT = "chinese_cht" # Chinese Traditional
TA = "ta" # Tamil
TE = "te" # Telugu
KA = "ka" # Kannada
TH = "th" # Thai
EL = "el" # Greek
LATIN = "latin" # Latin
ARABIC = "arabic" # Arabic
EAST_SLAVIC = "east_slavic" # East Slavic
CYRILLIC = "cyrillic" # Cyrillic
DEVANAGARI = "devanagari" # Devanagari
class MinerUParseMethod(StrEnum):
"""MinerU PDF parsing methods (pipeline backend only)."""
AUTO = "auto" # Automatically determine the method based on the file type
TXT = "txt" # Use text extraction method
OCR = "ocr" # Use OCR method for image-based PDFs
@dataclass
class MinerUParseOptions:
"""Options for MinerU PDF parsing."""
backend: MinerUBackend = MinerUBackend.PIPELINE
lang: Optional[MinerULanguage] = None # language for OCR (pipeline backend only)
method: MinerUParseMethod = MinerUParseMethod.AUTO
server_url: Optional[str] = None
delete_output: bool = True
parse_method: str = "raw"
formula_enable: bool = True
table_enable: bool = True
class MinerUParser(RAGFlowPdfParser):
def __init__(self, mineru_path: str = "mineru", mineru_api: str = "", mineru_server_url: str = ""):
self.mineru_api = mineru_api.rstrip("/")
self.mineru_server_url = mineru_server_url.rstrip("/")
self.outlines = []
self.logger = logging.getLogger(self.__class__.__name__)
@staticmethod
def _is_zipinfo_symlink(member: zipfile.ZipInfo) -> bool:
return (member.external_attr >> 16) & 0o170000 == 0o120000
def _extract_zip_no_root(self, zip_path, extract_to, root_dir):
self.logger.info(f"[MinerU] Extract zip: zip_path={zip_path}, extract_to={extract_to}, root_hint={root_dir}")
base_dir = Path(extract_to).resolve()
with zipfile.ZipFile(zip_path, "r") as zip_ref:
members = zip_ref.infolist()
if not root_dir:
if members and members[0].filename.endswith("/"):
root_dir = members[0].filename
else:
root_dir = None
if root_dir:
root_dir = root_dir.replace("\\", "/")
if not root_dir.endswith("/"):
root_dir += "/"
for member in members:
if member.flag_bits & 0x1:
raise RuntimeError(f"[MinerU] Encrypted zip entry not supported: {member.filename}")
if self._is_zipinfo_symlink(member):
raise RuntimeError(f"[MinerU] Symlink zip entry not supported: {member.filename}")
name = member.filename.replace("\\", "/")
if root_dir and name == root_dir:
self.logger.info("[MinerU] Ignore root folder...")
continue
if root_dir and name.startswith(root_dir):
name = name[len(root_dir) :]
if not name:
continue
if name.startswith("/") or name.startswith("//") or re.match(r"^[A-Za-z]:", name):
raise RuntimeError(f"[MinerU] Unsafe zip path (absolute): {member.filename}")
parts = [p for p in name.split("/") if p not in ("", ".")]
if any(p == ".." for p in parts):
raise RuntimeError(f"[MinerU] Unsafe zip path (traversal): {member.filename}")
rel_path = os.path.join(*parts) if parts else ""
dest_path = (Path(extract_to) / rel_path).resolve(strict=False)
if dest_path != base_dir and base_dir not in dest_path.parents:
raise RuntimeError(f"[MinerU] Unsafe zip path (escape): {member.filename}")
if member.is_dir():
os.makedirs(dest_path, exist_ok=True)
continue
os.makedirs(dest_path.parent, exist_ok=True)
with zip_ref.open(member) as src, open(dest_path, "wb") as dst:
shutil.copyfileobj(src, dst)
@staticmethod
def _is_http_endpoint_valid(url, timeout=5):
try:
response = requests.head(url, timeout=timeout, allow_redirects=True)
return response.status_code in [200, 301, 302, 307, 308]
except Exception:
return False
def check_installation(self, backend: str = "pipeline", server_url: Optional[str] = None) -> tuple[bool, str]:
reason = ""
valid_backends = ["pipeline", "vlm-http-client", "vlm-transformers", "vlm-vllm-engine", "vlm-mlx-engine", "vlm-vllm-async-engine", "vlm-lmdeploy-engine"]
if backend not in valid_backends:
reason = f"[MinerU] Invalid backend '{backend}'. Valid backends are: {valid_backends}"
self.logger.warning(reason)
return False, reason
if not self.mineru_api:
reason = "[MinerU] MINERU_APISERVER not configured."
self.logger.warning(reason)
return False, reason
api_openapi = f"{self.mineru_api}/openapi.json"
try:
api_ok = self._is_http_endpoint_valid(api_openapi)
self.logger.info(f"[MinerU] API openapi.json reachable={api_ok} url={api_openapi}")
if not api_ok:
reason = f"[MinerU] MinerU API not accessible: {api_openapi}"
return False, reason
except Exception as exc:
reason = f"[MinerU] MinerU API check failed: {exc}"
self.logger.warning(reason)
return False, reason
if backend == "vlm-http-client":
resolved_server = server_url or self.mineru_server_url
if not resolved_server:
reason = "[MinerU] MINERU_SERVER_URL required for vlm-http-client backend."
self.logger.warning(reason)
return False, reason
try:
server_ok = self._is_http_endpoint_valid(resolved_server)
self.logger.info(f"[MinerU] vlm-http-client server check reachable={server_ok} url={resolved_server}")
except Exception as exc:
self.logger.warning(f"[MinerU] vlm-http-client server probe failed: {resolved_server}: {exc}")
return True, reason
def _run_mineru(
self, input_path: Path, output_dir: Path, options: MinerUParseOptions, callback: Optional[Callable] = None
) -> Path:
return self._run_mineru_api(input_path, output_dir, options, callback)
def _run_mineru_api(
self, input_path: Path, output_dir: Path, options: MinerUParseOptions, callback: Optional[Callable] = None
) -> Path:
pdf_file_path = str(input_path)
if not os.path.exists(pdf_file_path):
raise RuntimeError(f"[MinerU] PDF file not exists: {pdf_file_path}")
pdf_file_name = Path(pdf_file_path).stem.strip()
output_path = tempfile.mkdtemp(prefix=f"{pdf_file_name}_{options.method}_", dir=str(output_dir))
output_zip_path = os.path.join(str(output_dir), f"{Path(output_path).name}.zip")
data = {
"output_dir": "./output",
"lang_list": options.lang,
"backend": options.backend,
"parse_method": options.method,
"formula_enable": options.formula_enable,
"table_enable": options.table_enable,
"server_url": None,
"return_md": True,
"return_middle_json": True,
"return_model_output": True,
"return_content_list": True,
"return_images": True,
"response_format_zip": True,
"start_page_id": 0,
"end_page_id": 99999,
}
if options.server_url:
data["server_url"] = options.server_url
elif self.mineru_server_url:
data["server_url"] = self.mineru_server_url
self.logger.info(f"[MinerU] request {data=}")
self.logger.info(f"[MinerU] request {options=}")
headers = {"Accept": "application/json"}
try:
self.logger.info(f"[MinerU] invoke api: {self.mineru_api}/file_parse backend={options.backend} server_url={data.get('server_url')}")
if callback:
callback(0.20, f"[MinerU] invoke api: {self.mineru_api}/file_parse")
with open(pdf_file_path, "rb") as pdf_file:
files = {"files": (pdf_file_name + ".pdf", pdf_file, "application/pdf")}
with requests.post(
url=f"{self.mineru_api}/file_parse",
files=files,
data=data,
headers=headers,
timeout=1800,
stream=True,
) as response:
response.raise_for_status()
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/zip"):
self.logger.info(f"[MinerU] zip file returned, saving to {output_zip_path}...")
if callback:
callback(0.30, f"[MinerU] zip file returned, saving to {output_zip_path}...")
with open(output_zip_path, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
self.logger.info(f"[MinerU] Unzip to {output_path}...")
self._extract_zip_no_root(output_zip_path, output_path, pdf_file_name + "/")
if callback:
callback(0.40, f"[MinerU] Unzip to {output_path}...")
else:
self.logger.warning(f"[MinerU] not zip returned from api: {content_type}")
except Exception as e:
raise RuntimeError(f"[MinerU] api failed with exception {e}")
self.logger.info("[MinerU] Api completed successfully.")
return Path(output_path)
def __images__(self, fnm, zoomin: int = 1, page_from=0, page_to=600, callback=None):
self.page_from = page_from
self.page_to = page_to
try:
with pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(BytesIO(fnm)) as pdf:
self.pdf = pdf
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for _, p in
enumerate(self.pdf.pages[page_from:page_to])]
except Exception as e:
self.page_images = None
self.total_page = 0
self.logger.exception(e)
def _line_tag(self, bx):
pn = [bx["page_idx"] + 1]
positions = bx.get("bbox", (0, 0, 0, 0))
x0, top, x1, bott = positions
if hasattr(self, "page_images") and self.page_images and len(self.page_images) > bx["page_idx"]:
page_width, page_height = self.page_images[bx["page_idx"]].size
x0 = (x0 / 1000.0) * page_width
x1 = (x1 / 1000.0) * page_width
top = (top / 1000.0) * page_height
bott = (bott / 1000.0) * page_height
return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##".format("-".join([str(p) for p in pn]), x0, x1, top, bott)
def crop(self, text, ZM=1, need_position=False):
imgs = []
poss = self.extract_positions(text)
if not poss:
if need_position:
return None, None
return
if not getattr(self, "page_images", None):
self.logger.warning("[MinerU] crop called without page images; skipping image generation.")
if need_position:
return None, None
return
page_count = len(self.page_images)
filtered_poss = []
for pns, left, right, top, bottom in poss:
if not pns:
self.logger.warning("[MinerU] Empty page index list in crop; skipping this position.")
continue
valid_pns = [p for p in pns if 0 <= p < page_count]
if not valid_pns:
self.logger.warning(f"[MinerU] All page indices {pns} out of range for {page_count} pages; skipping.")
continue
filtered_poss.append((valid_pns, left, right, top, bottom))
poss = filtered_poss
if not poss:
self.logger.warning("[MinerU] No valid positions after filtering; skip cropping.")
if need_position:
return None, None
return
max_width = max(np.max([right - left for (_, left, right, _, _) in poss]), 6)
GAP = 6
pos = poss[0]
first_page_idx = pos[0][0]
poss.insert(0, ([first_page_idx], pos[1], pos[2], max(0, pos[3] - 120), max(pos[3] - GAP, 0)))
pos = poss[-1]
last_page_idx = pos[0][-1]
if not (0 <= last_page_idx < page_count):
self.logger.warning(
f"[MinerU] Last page index {last_page_idx} out of range for {page_count} pages; skipping crop.")
if need_position:
return None, None
return
last_page_height = self.page_images[last_page_idx].size[1]
poss.append(
(
[last_page_idx],
pos[1],
pos[2],
min(last_page_height, pos[4] + GAP),
min(last_page_height, pos[4] + 120),
)
)
positions = []
for ii, (pns, left, right, top, bottom) in enumerate(poss):
right = left + max_width
if bottom <= top:
bottom = top + 2
for pn in pns[1:]:
if 0 <= pn - 1 < page_count:
bottom += self.page_images[pn - 1].size[1]
else:
self.logger.warning(
f"[MinerU] Page index {pn}-1 out of range for {page_count} pages during crop; skipping height accumulation.")
if not (0 <= pns[0] < page_count):
self.logger.warning(
f"[MinerU] Base page index {pns[0]} out of range for {page_count} pages during crop; skipping this segment.")
continue
img0 = self.page_images[pns[0]]
x0, y0, x1, y1 = int(left), int(top), int(right), int(min(bottom, img0.size[1]))
crop0 = img0.crop((x0, y0, x1, y1))
imgs.append(crop0)
if 0 < ii < len(poss) - 1:
positions.append((pns[0] + self.page_from, x0, x1, y0, y1))
bottom -= img0.size[1]
for pn in pns[1:]:
if not (0 <= pn < page_count):
self.logger.warning(
f"[MinerU] Page index {pn} out of range for {page_count} pages during crop; skipping this page.")
continue
page = self.page_images[pn]
x0, y0, x1, y1 = int(left), 0, int(right), int(min(bottom, page.size[1]))
cimgp = page.crop((x0, y0, x1, y1))
imgs.append(cimgp)
if 0 < ii < len(poss) - 1:
positions.append((pn + self.page_from, x0, x1, y0, y1))
bottom -= page.size[1]
if not imgs:
if need_position:
return None, None
return
height = 0
for img in imgs:
height += img.size[1] + GAP
height = int(height)
width = int(np.max([i.size[0] for i in imgs]))
pic = Image.new("RGB", (width, height), (245, 245, 245))
height = 0
for ii, img in enumerate(imgs):
if ii == 0 or ii + 1 == len(imgs):
img = img.convert("RGBA")
overlay = Image.new("RGBA", img.size, (0, 0, 0, 0))
overlay.putalpha(128)
img = Image.alpha_composite(img, overlay).convert("RGB")
pic.paste(img, (0, int(height)))
height += img.size[1] + GAP
if need_position:
return pic, positions
return pic
@staticmethod
def extract_positions(txt: str):
poss = []
for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", txt):
pn, left, right, top, bottom = tag.strip("#").strip("@").split("\t")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom))
return poss
def _read_output(self, output_dir: Path, file_stem: str, method: str = "auto", backend: str = "pipeline") -> list[
dict[str, Any]]:
json_file = None
subdir = None
attempted = []
# mirror MinerU's sanitize_filename to align ZIP naming
def _sanitize_filename(name: str) -> str:
sanitized = re.sub(r"[/\\\.]{2,}|[/\\]", "", name)
sanitized = re.sub(r"[^\w.-]", "_", sanitized, flags=re.UNICODE)
if sanitized.startswith("."):
sanitized = "_" + sanitized[1:]
return sanitized or "unnamed"
safe_stem = _sanitize_filename(file_stem)
allowed_names = {f"{file_stem}_content_list.json", f"{safe_stem}_content_list.json"}
self.logger.info(f"[MinerU] Expected output files: {', '.join(sorted(allowed_names))}")
self.logger.info(f"[MinerU] Searching output in: {output_dir}")
jf = output_dir / f"{file_stem}_content_list.json"
self.logger.info(f"[MinerU] Trying original path: {jf}")
attempted.append(jf)
if jf.exists():
subdir = output_dir
json_file = jf
else:
alt = output_dir / f"{safe_stem}_content_list.json"
self.logger.info(f"[MinerU] Trying sanitized filename: {alt}")
attempted.append(alt)
if alt.exists():
subdir = output_dir
json_file = alt
else:
nested_alt = output_dir / safe_stem / f"{safe_stem}_content_list.json"
self.logger.info(f"[MinerU] Trying sanitized nested path: {nested_alt}")
attempted.append(nested_alt)
if nested_alt.exists():
subdir = nested_alt.parent
json_file = nested_alt
if not json_file:
raise FileNotFoundError(f"[MinerU] Missing output file, tried: {', '.join(str(p) for p in attempted)}")
with open(json_file, "r", encoding="utf-8") as f:
data = json.load(f)
for item in data:
for key in ("img_path", "table_img_path", "equation_img_path"):
if key in item and item[key]:
item[key] = str((subdir / item[key]).resolve())
return data
def _transfer_to_sections(self, outputs: list[dict[str, Any]], parse_method: str = None):
sections = []
for output in outputs:
match output["type"]:
case MinerUContentType.TEXT:
section = output.get("text", "")
case MinerUContentType.TABLE:
section = output.get("table_body", "") + "\n".join(output.get("table_caption", [])) + "\n".join(
output.get("table_footnote", []))
if not section.strip():
section = "FAILED TO PARSE TABLE"
case MinerUContentType.IMAGE:
section = "".join(output.get("image_caption", [])) + "\n" + "".join(
output.get("image_footnote", []))
case MinerUContentType.EQUATION:
section = output.get("text", "")
case MinerUContentType.CODE:
section = output.get("code_body", "") + "\n".join(output.get("code_caption", []))
case MinerUContentType.LIST:
section = "\n".join(output.get("list_items", []))
case MinerUContentType.DISCARDED:
continue # Skip discarded blocks entirely
if section and parse_method == "manual":
sections.append((section, output["type"], self._line_tag(output)))
elif section and parse_method == "paper":
sections.append((section + self._line_tag(output), output["type"]))
else:
sections.append((section, self._line_tag(output)))
return sections
def _transfer_to_tables(self, outputs: list[dict[str, Any]]):
return []
def parse_pdf(
self,
filepath: str | PathLike[str],
binary: BytesIO | bytes,
callback: Optional[Callable] = None,
*,
output_dir: Optional[str] = None,
backend: str = "pipeline",
server_url: Optional[str] = None,
delete_output: bool = True,
parse_method: str = "raw",
**kwargs,
) -> tuple:
import shutil
temp_pdf = None
created_tmp_dir = False
parser_cfg = kwargs.get('parser_config', {})
lang = parser_cfg.get('mineru_lang') or kwargs.get('lang', 'English')
mineru_lang_code = LANGUAGE_TO_MINERU_MAP.get(lang, 'ch') # Defaults to Chinese if not matched
mineru_method_raw_str = parser_cfg.get('mineru_parse_method', 'auto')
enable_formula = parser_cfg.get('mineru_formula_enable', True)
enable_table = parser_cfg.get('mineru_table_enable', True)
# remove spaces, or mineru crash, and _read_output fail too
file_path = Path(filepath)
pdf_file_name = file_path.stem.replace(" ", "") + ".pdf"
pdf_file_path_valid = os.path.join(file_path.parent, pdf_file_name)
if binary:
temp_dir = Path(tempfile.mkdtemp(prefix="mineru_bin_pdf_"))
temp_pdf = temp_dir / pdf_file_name
with open(temp_pdf, "wb") as f:
f.write(binary)
pdf = temp_pdf
self.logger.info(f"[MinerU] Received binary PDF -> {temp_pdf}")
if callback:
callback(0.15, f"[MinerU] Received binary PDF -> {temp_pdf}")
else:
if pdf_file_path_valid != filepath:
self.logger.info(f"[MinerU] Remove all space in file name: {pdf_file_path_valid}")
shutil.move(filepath, pdf_file_path_valid)
pdf = Path(pdf_file_path_valid)
if not pdf.exists():
if callback:
callback(-1, f"[MinerU] PDF not found: {pdf}")
raise FileNotFoundError(f"[MinerU] PDF not found: {pdf}")
if output_dir:
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
else:
out_dir = Path(tempfile.mkdtemp(prefix="mineru_pdf_"))
created_tmp_dir = True
self.logger.info(f"[MinerU] Output directory: {out_dir} backend={backend} api={self.mineru_api} server_url={server_url or self.mineru_server_url}")
if callback:
callback(0.15, f"[MinerU] Output directory: {out_dir}")
self.__images__(pdf, zoomin=1)
try:
options = MinerUParseOptions(
backend=MinerUBackend(backend),
lang=MinerULanguage(mineru_lang_code),
method=MinerUParseMethod(mineru_method_raw_str),
server_url=server_url,
delete_output=delete_output,
parse_method=parse_method,
formula_enable=enable_formula,
table_enable=enable_table,
)
final_out_dir = self._run_mineru(pdf, out_dir, options, callback=callback)
outputs = self._read_output(final_out_dir, pdf.stem, method=mineru_method_raw_str, backend=backend)
self.logger.info(f"[MinerU] Parsed {len(outputs)} blocks from PDF.")
if callback:
callback(0.75, f"[MinerU] Parsed {len(outputs)} blocks from PDF.")
return self._transfer_to_sections(outputs, parse_method), self._transfer_to_tables(outputs)
finally:
if temp_pdf and temp_pdf.exists():
try:
temp_pdf.unlink()
temp_pdf.parent.rmdir()
except Exception:
pass
if delete_output and created_tmp_dir and out_dir.exists():
try:
shutil.rmtree(out_dir)
except Exception:
pass
if __name__ == "__main__":
parser = MinerUParser("mineru")
ok, reason = parser.check_installation()
print("MinerU available:", ok)
filepath = ""
with open(filepath, "rb") as file:
outputs = parser.parse_pdf(filepath=filepath, binary=file.read())
for output in outputs:
print(output)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "deepdoc/parser/mineru_parser.py",
"license": "Apache License 2.0",
"lines": 583,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/utils/email_templates.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Reusable HTML email templates and registry.
"""
# Invitation email template
INVITE_EMAIL_TMPL = """
Hi {{email}},
{{inviter}} has invited you to join their team (ID: {{tenant_id}}).
Click the link below to complete your registration:
{{invite_url}}
If you did not request this, please ignore this email.
"""
# Password reset code template
RESET_CODE_EMAIL_TMPL = """
Hello,
Your password reset code is: {{ code }}
This code will expire in {{ ttl_min }} minutes.
"""
# Template registry
EMAIL_TEMPLATES = {
"invite": INVITE_EMAIL_TMPL,
"reset_code": RESET_CODE_EMAIL_TMPL,
}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/email_templates.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:api/db/services/pipeline_operation_log_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
from datetime import datetime, timedelta
from peewee import fn
from api.db import VALID_PIPELINE_TASK_TYPES, PipelineTaskType
from api.db.db_models import DB, Document, PipelineOperationLog
from api.db.services.canvas_service import UserCanvasService
from api.db.services.common_service import CommonService
from api.db.services.document_service import DocumentService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.task_service import GRAPH_RAPTOR_FAKE_DOC_ID
from common.misc_utils import get_uuid
from common.time_utils import current_timestamp, datetime_format
class PipelineOperationLogService(CommonService):
model = PipelineOperationLog
@classmethod
def get_file_logs_fields(cls):
return [
cls.model.id,
cls.model.document_id,
cls.model.tenant_id,
cls.model.kb_id,
cls.model.pipeline_id,
cls.model.pipeline_title,
cls.model.parser_id,
cls.model.document_name,
cls.model.document_suffix,
cls.model.document_type,
cls.model.source_from,
cls.model.progress,
cls.model.progress_msg,
cls.model.process_begin_at,
cls.model.process_duration,
cls.model.dsl,
cls.model.task_type,
cls.model.operation_status,
cls.model.avatar,
cls.model.status,
cls.model.create_time,
cls.model.create_date,
cls.model.update_time,
cls.model.update_date,
]
@classmethod
def get_dataset_logs_fields(cls):
return [
cls.model.id,
cls.model.tenant_id,
cls.model.kb_id,
cls.model.progress,
cls.model.progress_msg,
cls.model.process_begin_at,
cls.model.process_duration,
cls.model.task_type,
cls.model.operation_status,
cls.model.avatar,
cls.model.status,
cls.model.create_time,
cls.model.create_date,
cls.model.update_time,
cls.model.update_date,
]
@classmethod
def save(cls, **kwargs):
"""
wrap this function in a transaction
"""
sample_obj = cls.model(**kwargs).save(force_insert=True)
return sample_obj
@classmethod
@DB.connection_context()
def create(cls, document_id, pipeline_id, task_type, fake_document_ids=[], dsl: str = "{}"):
referred_document_id = document_id
if referred_document_id == GRAPH_RAPTOR_FAKE_DOC_ID and fake_document_ids:
referred_document_id = fake_document_ids[0]
ok, document = DocumentService.get_by_id(referred_document_id)
if not ok:
logging.warning(f"Document for referred_document_id {referred_document_id} not found")
return None
DocumentService.update_progress_immediately([document.to_dict()])
ok, document = DocumentService.get_by_id(referred_document_id)
if not ok:
logging.warning(f"Document for referred_document_id {referred_document_id} not found")
return None
if document.progress not in [1, -1]:
return None
operation_status = document.run
if pipeline_id:
ok, user_pipeline = UserCanvasService.get_by_id(pipeline_id)
if not ok:
raise RuntimeError(f"Pipeline {pipeline_id} not found")
tenant_id = user_pipeline.user_id
title = user_pipeline.title
avatar = user_pipeline.avatar
else:
ok, kb_info = KnowledgebaseService.get_by_id(document.kb_id)
if not ok:
raise RuntimeError(f"Cannot find dataset {document.kb_id} for referred_document {referred_document_id}")
tenant_id = kb_info.tenant_id
title = document.parser_id
avatar = document.thumbnail
if task_type not in VALID_PIPELINE_TASK_TYPES:
raise ValueError(f"Invalid task type: {task_type}")
if task_type in [PipelineTaskType.GRAPH_RAG, PipelineTaskType.RAPTOR, PipelineTaskType.MINDMAP]:
finish_at = document.process_begin_at + timedelta(seconds=document.process_duration)
if task_type == PipelineTaskType.GRAPH_RAG:
KnowledgebaseService.update_by_id(
document.kb_id,
{"graphrag_task_finish_at": finish_at},
)
elif task_type == PipelineTaskType.RAPTOR:
KnowledgebaseService.update_by_id(
document.kb_id,
{"raptor_task_finish_at": finish_at},
)
elif task_type == PipelineTaskType.MINDMAP:
KnowledgebaseService.update_by_id(
document.kb_id,
{"mindmap_task_finish_at": finish_at},
)
log = dict(
id=get_uuid(),
document_id=document_id, # GRAPH_RAPTOR_FAKE_DOC_ID or real document_id
tenant_id=tenant_id,
kb_id=document.kb_id,
pipeline_id=pipeline_id,
pipeline_title=title,
parser_id=document.parser_id,
document_name=document.name,
document_suffix=document.suffix,
document_type=document.type,
source_from=document.source_type.split("/")[0],
progress=document.progress,
progress_msg=document.progress_msg,
process_begin_at=document.process_begin_at,
process_duration=document.process_duration,
dsl=json.loads(dsl),
task_type=task_type,
operation_status=operation_status,
avatar=avatar,
)
timestamp = current_timestamp()
datetime_now = datetime_format(datetime.now())
log["create_time"] = timestamp
log["create_date"] = datetime_now
log["update_time"] = timestamp
log["update_date"] = datetime_now
with DB.atomic():
obj = cls.save(**log)
limit = int(os.getenv("PIPELINE_OPERATION_LOG_LIMIT", 1000))
total = cls.model.select().where(cls.model.kb_id == document.kb_id).count()
if total > limit:
keep_ids = [m.id for m in cls.model.select(cls.model.id).where(cls.model.kb_id == document.kb_id).order_by(cls.model.create_time.desc()).limit(limit)]
deleted = cls.model.delete().where(cls.model.kb_id == document.kb_id, cls.model.id.not_in(keep_ids)).execute()
logging.info(f"[PipelineOperationLogService] Cleaned {deleted} old logs, kept latest {limit} for {document.kb_id}")
return obj
@classmethod
@DB.connection_context()
def record_pipeline_operation(cls, document_id, pipeline_id, task_type, fake_document_ids=[]):
return cls.create(document_id=document_id, pipeline_id=pipeline_id, task_type=task_type, fake_document_ids=fake_document_ids)
@classmethod
@DB.connection_context()
def get_file_logs_by_kb_id(cls, kb_id, page_number, items_per_page, orderby, desc, keywords, operation_status, types, suffix, create_date_from=None, create_date_to=None):
fields = cls.get_file_logs_fields()
if keywords:
logs = cls.model.select(*fields).where((cls.model.kb_id == kb_id), (fn.LOWER(cls.model.document_name).contains(keywords.lower())))
else:
logs = cls.model.select(*fields).where(cls.model.kb_id == kb_id)
logs = logs.where(cls.model.document_id != GRAPH_RAPTOR_FAKE_DOC_ID)
if operation_status:
logs = logs.where(cls.model.operation_status.in_(operation_status))
if types:
logs = logs.where(cls.model.document_type.in_(types))
if suffix:
logs = logs.where(cls.model.document_suffix.in_(suffix))
if create_date_from:
logs = logs.where(cls.model.create_date >= create_date_from)
if create_date_to:
logs = logs.where(cls.model.create_date <= create_date_to)
count = logs.count()
if desc:
logs = logs.order_by(cls.model.getter_by(orderby).desc())
else:
logs = logs.order_by(cls.model.getter_by(orderby).asc())
if page_number and items_per_page:
logs = logs.paginate(page_number, items_per_page)
return list(logs.dicts()), count
@classmethod
@DB.connection_context()
def get_documents_info(cls, id):
fields = [Document.id, Document.name, Document.progress, Document.kb_id]
return (
cls.model.select(*fields)
.join(Document, on=(cls.model.document_id == Document.id))
.where(
cls.model.id == id
)
.dicts()
)
@classmethod
@DB.connection_context()
def get_dataset_logs_by_kb_id(cls, kb_id, page_number, items_per_page, orderby, desc, operation_status, create_date_from=None, create_date_to=None):
fields = cls.get_dataset_logs_fields()
logs = cls.model.select(*fields).where((cls.model.kb_id == kb_id), (cls.model.document_id == GRAPH_RAPTOR_FAKE_DOC_ID))
if operation_status:
logs = logs.where(cls.model.operation_status.in_(operation_status))
if create_date_from:
logs = logs.where(cls.model.create_date >= create_date_from)
if create_date_to:
logs = logs.where(cls.model.create_date <= create_date_to)
count = logs.count()
if desc:
logs = logs.order_by(cls.model.getter_by(orderby).desc())
else:
logs = logs.order_by(cls.model.getter_by(orderby).asc())
if page_number and items_per_page:
logs = logs.paginate(page_number, items_per_page)
return list(logs.dicts()), count
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/pipeline_operation_log_service.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/flow/extractor/extractor.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
from copy import deepcopy
import xxhash
from agent.component.llm import LLMParam, LLM
from rag.flow.base import ProcessBase, ProcessParamBase
from rag.prompts.generator import run_toc_from_text
class ExtractorParam(ProcessParamBase, LLMParam):
def __init__(self):
super().__init__()
self.field_name = ""
def check(self):
super().check()
self.check_empty(self.field_name, "Result Destination")
class Extractor(ProcessBase, LLM):
component_name = "Extractor"
async def _build_TOC(self, docs):
self.callback(0.2,message="Start to generate table of content ...")
docs = sorted(docs, key=lambda d:(
d.get("page_num_int", 0)[0] if isinstance(d.get("page_num_int", 0), list) else d.get("page_num_int", 0),
d.get("top_int", 0)[0] if isinstance(d.get("top_int", 0), list) else d.get("top_int", 0)
))
toc = await run_toc_from_text([d["text"] for d in docs], self.chat_mdl)
logging.info("------------ T O C -------------\n"+json.dumps(toc, ensure_ascii=False, indent=' '))
ii = 0
while ii < len(toc):
try:
idx = int(toc[ii]["chunk_id"])
del toc[ii]["chunk_id"]
toc[ii]["ids"] = [docs[idx]["id"]]
if ii == len(toc) -1:
break
for jj in range(idx+1, int(toc[ii+1]["chunk_id"])+1):
toc[ii]["ids"].append(docs[jj]["id"])
except Exception as e:
logging.exception(e)
ii += 1
if toc:
d = deepcopy(docs[-1])
d["doc_id"] = self._canvas._doc_id
d["content_with_weight"] = json.dumps(toc, ensure_ascii=False)
d["toc_kwd"] = "toc"
d["available_int"] = 0
d["page_num_int"] = [100000000]
d["id"] = xxhash.xxh64((d["content_with_weight"] + str(d["doc_id"])).encode("utf-8", "surrogatepass")).hexdigest()
return d
return None
async def _invoke(self, **kwargs):
self.set_output("output_format", "chunks")
self.callback(random.randint(1, 5) / 100.0, "Start to generate.")
inputs = self.get_input_elements()
chunks = []
chunks_key = ""
args = {}
for k, v in inputs.items():
args[k] = v["value"]
if isinstance(args[k], list):
chunks = deepcopy(args[k])
chunks_key = k
if chunks:
if self._param.field_name == "toc":
for ck in chunks:
ck["doc_id"] = self._canvas._doc_id
ck["id"] = xxhash.xxh64((ck["text"] + str(ck["doc_id"])).encode("utf-8")).hexdigest()
toc =await self._build_TOC(chunks)
chunks.append(toc)
self.set_output("chunks", chunks)
return
prog = 0
for i, ck in enumerate(chunks):
args[chunks_key] = ck["text"]
msg, sys_prompt = self._sys_prompt_and_msg([], args)
msg.insert(0, {"role": "system", "content": sys_prompt})
ck[self._param.field_name] = await self._generate_async(msg)
prog += 1./len(chunks)
if i % (len(chunks)//100+1) == 1:
self.callback(prog, f"{i+1} / {len(chunks)}")
self.set_output("chunks", chunks)
else:
msg, sys_prompt = self._sys_prompt_and_msg([], args)
msg.insert(0, {"role": "system", "content": sys_prompt})
self.set_output("chunks", [{self._param.field_name: await self._generate_async(msg)}])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/extractor/extractor.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/flow/extractor/schema.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Literal
from pydantic import BaseModel, ConfigDict, Field
class ExtractorFromUpstream(BaseModel):
created_time: float | None = Field(default=None, alias="_created_time")
elapsed_time: float | None = Field(default=None, alias="_elapsed_time")
name: str
file: dict | None = Field(default=None)
chunks: list[dict[str, Any]] | None = Field(default=None)
output_format: Literal["json", "markdown", "text", "html", "chunks"] | None = Field(default=None)
json_result: list[dict[str, Any]] | None = Field(default=None, alias="json")
markdown_result: str | None = Field(default=None, alias="markdown")
text_result: str | None = Field(default=None, alias="text")
html_result: str | None = Field(default=None, alias="html")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
# def to_dict(self, *, exclude_none: bool = True) -> dict:
# return self.model_dump(by_alias=True, exclude_none=exclude_none)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/extractor/schema.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:rag/flow/hierarchical_merger/hierarchical_merger.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import random
import re
from copy import deepcopy
from functools import partial
from common.misc_utils import get_uuid
from rag.utils.base64_image import id2image, image2id
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
from rag.flow.base import ProcessBase, ProcessParamBase
from rag.flow.hierarchical_merger.schema import HierarchicalMergerFromUpstream
from rag.nlp import concat_img
from common import settings
class HierarchicalMergerParam(ProcessParamBase):
def __init__(self):
super().__init__()
self.levels = []
self.hierarchy = None
def check(self):
self.check_empty(self.levels, "Hierarchical setups.")
self.check_empty(self.hierarchy, "Hierarchy number.")
def get_input_form(self) -> dict[str, dict]:
return {}
class HierarchicalMerger(ProcessBase):
component_name = "HierarchicalMerger"
async def _invoke(self, **kwargs):
try:
from_upstream = HierarchicalMergerFromUpstream.model_validate(kwargs)
except Exception as e:
self.set_output("_ERROR", f"Input error: {str(e)}")
return
self.set_output("output_format", "chunks")
self.callback(random.randint(1, 5) / 100.0, "Start to merge hierarchically.")
if from_upstream.output_format in ["markdown", "text", "html"]:
if from_upstream.output_format == "markdown":
payload = from_upstream.markdown_result
elif from_upstream.output_format == "text":
payload = from_upstream.text_result
else: # == "html"
payload = from_upstream.html_result
if not payload:
payload = ""
lines = [ln for ln in payload.split("\n") if ln]
else:
arr = from_upstream.chunks if from_upstream.output_format == "chunks" else from_upstream.json_result
arr = arr or []
sections, section_images = [], []
lines = []
for o in arr:
if isinstance(o, dict):
raw_text = o.get("text")
position_tag = o.get("position_tag", "")
img_id = o.get("img_id")
else:
raw_text = o
position_tag = ""
img_id = None
txt = raw_text if isinstance(raw_text, str) else ("" if raw_text is None else str(raw_text))
lines.append(txt)
sections.append((txt, position_tag))
section_images.append(img_id)
matches = []
for txt in lines:
good = False
for lvl, regs in enumerate(self._param.levels):
for reg in regs:
if re.search(reg, txt):
matches.append(lvl)
good = True
break
if good:
break
if not good:
matches.append(len(self._param.levels))
assert len(matches) == len(lines), f"{len(matches)} vs. {len(lines)}"
root = {
"level": -1,
"index": -1,
"texts": [],
"children": []
}
for i, m in enumerate(matches):
if m == 0:
root["children"].append({
"level": m,
"index": i,
"texts": [],
"children": []
})
elif m == len(self._param.levels):
def dfs(b):
if not b["children"]:
b["texts"].append(i)
else:
dfs(b["children"][-1])
dfs(root)
else:
def dfs(b):
nonlocal m, i
if not b["children"] or m == b["level"] + 1:
b["children"].append({
"level": m,
"index": i,
"texts": [],
"children": []
})
return
dfs(b["children"][-1])
dfs(root)
all_pathes = []
def dfs(n, path, depth):
nonlocal all_pathes
if not n["children"] and path:
all_pathes.append(path)
for nn in n["children"]:
if depth < self._param.hierarchy:
_path = deepcopy(path)
else:
_path = path
_path.extend([nn["index"], *nn["texts"]])
dfs(nn, _path, depth+1)
if depth == self._param.hierarchy:
all_pathes.append(_path)
dfs(root, [], 0)
if root["texts"]:
all_pathes.insert(0, root["texts"])
if from_upstream.output_format in ["markdown", "text", "html"]:
cks = []
for path in all_pathes:
txt = ""
for i in path:
txt += lines[i] + "\n"
cks.append(txt)
self.set_output("chunks", [{"text": c} for c in cks if c])
else:
cks = []
images = []
for path in all_pathes:
txt = ""
img = None
for i in path:
txt += lines[i] + "\n"
concat_img(img, id2image(section_images[i], partial(settings.STORAGE_IMPL.get, tenant_id=self._canvas._tenant_id)))
cks.append(txt)
images.append(img)
cks = [
{
"text": RAGFlowPdfParser.remove_tag(c),
"image": img,
"positions": RAGFlowPdfParser.extract_positions(c),
}
for c, img in zip(cks, images)
]
tasks = []
for d in cks:
tasks.append(asyncio.create_task(image2id(d, partial(settings.STORAGE_IMPL.put, tenant_id=self._canvas._tenant_id), get_uuid())))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in image2id: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
self.set_output("chunks", cks)
self.callback(1, "Done.")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/hierarchical_merger/hierarchical_merger.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/flow/hierarchical_merger/schema.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Literal
from pydantic import BaseModel, ConfigDict, Field
class HierarchicalMergerFromUpstream(BaseModel):
created_time: float | None = Field(default=None, alias="_created_time")
elapsed_time: float | None = Field(default=None, alias="_elapsed_time")
name: str
file: dict | None = Field(default=None)
chunks: list[dict[str, Any]] | None = Field(default=None)
output_format: Literal["json", "markdown", "text", "html", "chunks"] | None = Field(default=None)
json_result: list[dict[str, Any]] | None = Field(default=None, alias="json")
markdown_result: str | None = Field(default=None, alias="markdown")
text_result: str | None = Field(default=None, alias="text")
html_result: str | None = Field(default=None, alias="html")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
# def to_dict(self, *, exclude_none: bool = True) -> dict:
# return self.model_dump(by_alias=True, exclude_none=exclude_none)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/hierarchical_merger/schema.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:rag/flow/splitter/splitter.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import random
import re
from copy import deepcopy
from functools import partial
from common.misc_utils import get_uuid
from rag.utils.base64_image import id2image, image2id
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
from rag.flow.base import ProcessBase, ProcessParamBase
from rag.flow.splitter.schema import SplitterFromUpstream
from common.float_utils import normalize_overlapped_percent
from rag.nlp import attach_media_context, naive_merge, naive_merge_with_images
from common import settings
class SplitterParam(ProcessParamBase):
def __init__(self):
super().__init__()
self.chunk_token_size = 512
self.delimiters = ["\n"]
self.overlapped_percent = 0
self.children_delimiters = []
self.table_context_size = 0
self.image_context_size = 0
def check(self):
self.check_empty(self.delimiters, "Delimiters.")
self.check_positive_integer(self.chunk_token_size, "Chunk token size.")
self.check_decimal_float(self.overlapped_percent, "Overlapped percentage: [0, 1)")
self.check_nonnegative_number(self.table_context_size, "Table context size.")
self.check_nonnegative_number(self.image_context_size, "Image context size.")
def get_input_form(self) -> dict[str, dict]:
return {}
class Splitter(ProcessBase):
component_name = "Splitter"
async def _invoke(self, **kwargs):
try:
from_upstream = SplitterFromUpstream.model_validate(kwargs)
except Exception as e:
self.set_output("_ERROR", f"Input error: {str(e)}")
return
deli = ""
for d in self._param.delimiters:
if len(d) > 1:
deli += f"`{d}`"
else:
deli += d
custom_pattern = "|".join(re.escape(t) for t in sorted(set(self._param.children_delimiters), key=len, reverse=True))
self.set_output("output_format", "chunks")
self.callback(random.randint(1, 5) / 100.0, "Start to split into chunks.")
overlapped_percent = normalize_overlapped_percent(self._param.overlapped_percent)
if from_upstream.output_format in ["markdown", "text", "html"]:
if from_upstream.output_format == "markdown":
payload = from_upstream.markdown_result
elif from_upstream.output_format == "text":
payload = from_upstream.text_result
else: # == "html"
payload = from_upstream.html_result
if not payload:
payload = ""
cks = naive_merge(
payload,
self._param.chunk_token_size,
deli,
overlapped_percent,
)
if custom_pattern:
docs = []
for c in cks:
if not c.strip():
continue
split_sec = re.split(r"(%s)" % custom_pattern, c, flags=re.DOTALL)
if split_sec:
for j in range(0, len(split_sec), 2):
if not split_sec[j].strip():
continue
docs.append({
"text": split_sec[j],
"mom": c
})
else:
docs.append({"text": c})
self.set_output("chunks", docs)
else:
self.set_output("chunks", [{"text": c.strip()} for c in cks if c.strip()])
self.callback(1, "Done.")
return
# json
json_result = from_upstream.json_result or []
if self._param.table_context_size or self._param.image_context_size:
for ck in json_result:
if "image" not in ck and ck.get("img_id") and not (isinstance(ck.get("text"), str) and ck.get("text").strip()):
ck["image"] = True
attach_media_context(json_result, self._param.table_context_size, self._param.image_context_size)
for ck in json_result:
if ck.get("image") is True:
del ck["image"]
sections, section_images = [], []
for o in json_result:
sections.append((o.get("text", ""), o.get("position_tag", "")))
section_images.append(id2image(o.get("img_id"), partial(settings.STORAGE_IMPL.get, tenant_id=self._canvas._tenant_id)))
chunks, images = naive_merge_with_images(
sections,
section_images,
self._param.chunk_token_size,
deli,
overlapped_percent,
)
cks = [
{
"text": RAGFlowPdfParser.remove_tag(c),
"image": img,
"positions": [[pos[0][-1], *pos[1:]] for pos in RAGFlowPdfParser.extract_positions(c)]
}
for c, img in zip(chunks, images) if c.strip()
]
tasks = []
for d in cks:
tasks.append(asyncio.create_task(image2id(d, partial(settings.STORAGE_IMPL.put, tenant_id=self._canvas._tenant_id), get_uuid())))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"error when splitting: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if custom_pattern:
docs = []
for c in cks:
split_sec = re.split(r"(%s)" % custom_pattern, c["text"], flags=re.DOTALL)
if split_sec:
c["mom"] = c["text"]
for j in range(0, len(split_sec), 2):
if not split_sec[j].strip():
continue
cc = deepcopy(c)
cc["text"] = split_sec[j]
docs.append(cc)
else:
docs.append(c)
self.set_output("chunks", docs)
else:
self.set_output("chunks", cks)
self.callback(1, "Done.")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/splitter/splitter.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/common/check_team_permission.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from api.db import TenantPermission
from api.db.db_models import File, Knowledgebase
from api.db.services.file_service import FileService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.user_service import TenantService
def check_kb_team_permission(kb: dict | Knowledgebase, other: str) -> bool:
kb = kb.to_dict() if isinstance(kb, Knowledgebase) else kb
kb_tenant_id = kb["tenant_id"]
if kb_tenant_id == other:
return True
if kb["permission"] != TenantPermission.TEAM:
return False
joined_tenants = TenantService.get_joined_tenants_by_user_id(other)
return any(tenant["tenant_id"] == kb_tenant_id for tenant in joined_tenants)
def check_file_team_permission(file: dict | File, other: str) -> bool:
file = file.to_dict() if isinstance(file, File) else file
file_tenant_id = file["tenant_id"]
if file_tenant_id == other:
return True
file_id = file["id"]
kb_ids = [kb_info["kb_id"] for kb_info in FileService.get_kb_id_by_file_id(file_id)]
for kb_id in kb_ids:
ok, kb = KnowledgebaseService.get_by_id(kb_id)
if not ok:
continue
if check_kb_team_permission(kb, other):
return True
return False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/common/check_team_permission.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/common/base64.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
def encode_to_base64(input_string):
base64_encoded = base64.b64encode(input_string.encode('utf-8'))
return base64_encoded.decode('utf-8') | {
"repo_id": "infiniflow/ragflow",
"file_path": "api/common/base64.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:api/utils/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import xxhash
def string_to_bytes(string):
return string if isinstance(
string, bytes) else string.encode(encoding="utf-8")
def bytes_to_string(byte):
return byte.decode(encoding="utf-8")
# 128 bit = 32 character
def hash128(data: str) -> str:
return xxhash.xxh128(data).hexdigest()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/common.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:api/utils/configs.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import base64
import pickle
from api.utils.common import bytes_to_string, string_to_bytes
from common.config_utils import get_base_config
safe_module = {
'numpy',
'rag_flow'
}
class RestrictedUnpickler(pickle.Unpickler):
def find_class(self, module, name):
import importlib
if module.split('.')[0] in safe_module:
_module = importlib.import_module(module)
return getattr(_module, name)
# Forbid everything else.
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
(module, name))
def restricted_loads(src):
"""Helper function analogous to pickle.loads()."""
return RestrictedUnpickler(io.BytesIO(src)).load()
def serialize_b64(src, to_str=False):
dest = base64.b64encode(pickle.dumps(src))
if not to_str:
return dest
else:
return bytes_to_string(dest)
def deserialize_b64(src):
src = base64.b64decode(
string_to_bytes(src) if isinstance(
src, str) else src)
use_deserialize_safe_module = get_base_config(
'use_deserialize_safe_module', False)
if use_deserialize_safe_module:
return restricted_loads(src)
return pickle.loads(src)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/configs.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:api/db/joint_services/user_account_service.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import uuid
from api.utils.api_utils import group_by
from api.db import FileType, UserTenantRole
from api.db.services.api_service import APITokenService, API4ConversationService
from api.db.services.canvas_service import UserCanvasService
from api.db.services.conversation_service import ConversationService
from api.db.services.dialog_service import DialogService
from api.db.services.document_service import DocumentService
from api.db.services.doc_metadata_service import DocMetadataService
from api.db.services.file2document_service import File2DocumentService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.langfuse_service import TenantLangfuseService
from api.db.services.llm_service import get_init_tenant_llm
from api.db.services.file_service import FileService
from api.db.services.mcp_server_service import MCPServerService
from api.db.services.search_service import SearchService
from api.db.services.task_service import TaskService
from api.db.services.tenant_llm_service import TenantLLMService
from api.db.services.user_canvas_version import UserCanvasVersionService
from api.db.services.user_service import TenantService, UserService, UserTenantService
from api.db.services.memory_service import MemoryService
from memory.services.messages import MessageService
from rag.nlp import search
from common.constants import ActiveEnum
from common import settings
def create_new_user(user_info: dict) -> dict:
"""
Add a new user, and create tenant, tenant llm, file folder for new user.
:param user_info: {
"email": <example@example.com>,
"nickname": <str, "name">,
"password": <decrypted password>,
"login_channel": <enum, "password">,
"is_superuser": <bool, role == "admin">,
}
:return: {
"success": <bool>,
"user_info": <dict>, # if true, return user_info
}
"""
# generate user_id and access_token for user
user_id = uuid.uuid1().hex
user_info['id'] = user_id
user_info['access_token'] = uuid.uuid1().hex
# construct tenant info
tenant = {
"id": user_id,
"name": user_info["nickname"] + "โs Kingdom",
"llm_id": settings.CHAT_MDL,
"embd_id": settings.EMBEDDING_MDL,
"asr_id": settings.ASR_MDL,
"parser_ids": settings.PARSERS,
"img2txt_id": settings.IMAGE2TEXT_MDL,
"rerank_id": settings.RERANK_MDL,
}
usr_tenant = {
"tenant_id": user_id,
"user_id": user_id,
"invited_by": user_id,
"role": UserTenantRole.OWNER,
}
# construct file folder info
file_id = uuid.uuid1().hex
file = {
"id": file_id,
"parent_id": file_id,
"tenant_id": user_id,
"created_by": user_id,
"name": "/",
"type": FileType.FOLDER.value,
"size": 0,
"location": "",
}
try:
tenant_llm = get_init_tenant_llm(user_id)
if not UserService.save(**user_info):
return {"success": False}
TenantService.insert(**tenant)
UserTenantService.insert(**usr_tenant)
TenantLLMService.insert_many(tenant_llm)
FileService.insert(file)
return {
"success": True,
"user_info": user_info,
}
except Exception as create_error:
logging.exception(create_error)
# rollback
try:
metadata_index_name = DocMetadataService._get_doc_meta_index_name(user_id)
settings.docStoreConn.delete_idx(metadata_index_name, "")
except Exception as e:
logging.exception(e)
try:
TenantService.delete_by_id(user_id)
except Exception as e:
logging.exception(e)
try:
u = UserTenantService.query(tenant_id=user_id)
if u:
UserTenantService.delete_by_id(u[0].id)
except Exception as e:
logging.exception(e)
try:
TenantLLMService.delete_by_tenant_id(user_id)
except Exception as e:
logging.exception(e)
try:
FileService.delete_by_id(file["id"])
except Exception as e:
logging.exception(e)
# delete user row finally
try:
UserService.delete_by_id(user_id)
except Exception as e:
logging.exception(e)
# reraise
raise create_error
def delete_user_data(user_id: str) -> dict:
# use user_id to delete
usr = UserService.filter_by_id(user_id)
if not usr:
return {"success": False, "message": f"{user_id} can't be found."}
# check is inactive and not admin
if usr.is_active == ActiveEnum.ACTIVE.value:
return {"success": False, "message": f"{user_id} is active and can't be deleted."}
if usr.is_superuser:
return {"success": False, "message": "Can't delete the super user."}
# tenant info
tenants = UserTenantService.get_user_tenant_relation_by_user_id(usr.id)
owned_tenant = [t for t in tenants if t["role"] == UserTenantRole.OWNER.value]
done_msg = ''
try:
# step1. delete owned tenant info
if owned_tenant:
done_msg += "Start to delete owned tenant.\n"
tenant_id = owned_tenant[0]["tenant_id"]
kb_ids = KnowledgebaseService.get_kb_ids(usr.id)
# step1.1 delete dataset related file and info
if kb_ids:
# step1.1.1 delete files in storage, remove bucket
for kb_id in kb_ids:
if settings.STORAGE_IMPL.bucket_exists(kb_id):
settings.STORAGE_IMPL.remove_bucket(kb_id)
done_msg += f"- Removed {len(kb_ids)} dataset's buckets.\n"
# step1.1.2 delete file and document info in db
doc_ids = DocumentService.get_all_doc_ids_by_kb_ids(kb_ids)
if doc_ids:
for doc in doc_ids:
try:
DocMetadataService.delete_document_metadata(doc["id"], skip_empty_check=True)
except Exception as e:
logging.warning(f"Failed to delete metadata for document {doc['id']}: {e}")
doc_delete_res = DocumentService.delete_by_ids([i["id"] for i in doc_ids])
done_msg += f"- Deleted {doc_delete_res} document records.\n"
task_delete_res = TaskService.delete_by_doc_ids([i["id"] for i in doc_ids])
done_msg += f"- Deleted {task_delete_res} task records.\n"
file_ids = FileService.get_all_file_ids_by_tenant_id(usr.id)
if file_ids:
file_delete_res = FileService.delete_by_ids([f["id"] for f in file_ids])
done_msg += f"- Deleted {file_delete_res} file records.\n"
if doc_ids or file_ids:
file2doc_delete_res = File2DocumentService.delete_by_document_ids_or_file_ids(
[i["id"] for i in doc_ids],
[f["id"] for f in file_ids]
)
done_msg += f"- Deleted {file2doc_delete_res} document-file relation records.\n"
# step1.1.3 delete chunk in es
r = settings.docStoreConn.delete({"kb_id": kb_ids},
search.index_name(tenant_id), kb_ids)
done_msg += f"- Deleted {r} chunk records.\n"
kb_delete_res = KnowledgebaseService.delete_by_ids(kb_ids)
done_msg += f"- Deleted {kb_delete_res} dataset records.\n"
# step1.1.4 delete agents
agent_delete_res = delete_user_agents(usr.id)
done_msg += f"- Deleted {agent_delete_res['agents_deleted_count']} agent, {agent_delete_res['version_deleted_count']} versions records.\n"
# step1.1.5 delete dialogs
dialog_delete_res = delete_user_dialogs(usr.id)
done_msg += f"- Deleted {dialog_delete_res['dialogs_deleted_count']} dialogs, {dialog_delete_res['conversations_deleted_count']} conversations, {dialog_delete_res['api_token_deleted_count']} api tokens, {dialog_delete_res['api4conversation_deleted_count']} api4conversations.\n"
# step1.1.6 delete mcp server
mcp_delete_res = MCPServerService.delete_by_tenant_id(usr.id)
done_msg += f"- Deleted {mcp_delete_res} MCP server.\n"
# step1.1.7 delete search
search_delete_res = SearchService.delete_by_tenant_id(usr.id)
done_msg += f"- Deleted {search_delete_res} search records.\n"
# step1.2 delete tenant_llm and tenant_langfuse
llm_delete_res = TenantLLMService.delete_by_tenant_id(tenant_id)
done_msg += f"- Deleted {llm_delete_res} tenant-LLM records.\n"
langfuse_delete_res = TenantLangfuseService.delete_ty_tenant_id(tenant_id)
done_msg += f"- Deleted {langfuse_delete_res} langfuse records.\n"
try:
metadata_index_name = DocMetadataService._get_doc_meta_index_name(tenant_id)
settings.docStoreConn.delete_idx(metadata_index_name, "")
done_msg += f"- Deleted metadata table {metadata_index_name}.\n"
except Exception as e:
logging.warning(f"Failed to delete metadata table for tenant {tenant_id}: {e}")
done_msg += "- Warning: Failed to delete metadata table (continuing).\n"
# step1.3 delete memory and messages
user_memory = MemoryService.get_by_tenant_id(tenant_id)
if user_memory:
for memory in user_memory:
if MessageService.has_index(tenant_id, memory.id):
MessageService.delete_index(tenant_id, memory.id)
done_msg += " Deleted memory index."
memory_delete_res = MemoryService.delete_by_ids([m.id for m in user_memory])
done_msg += f"Deleted {memory_delete_res} memory datasets."
# step1.4 delete own tenant
tenant_delete_res = TenantService.delete_by_id(tenant_id)
done_msg += f"- Deleted {tenant_delete_res} tenant.\n"
# step2 delete user-tenant relation
if tenants:
# step2.1 delete docs and files in joined team
joined_tenants = [t for t in tenants if t["role"] == UserTenantRole.NORMAL.value]
if joined_tenants:
done_msg += "Start to delete data in joined tenants.\n"
created_documents = DocumentService.get_all_docs_by_creator_id(usr.id)
if created_documents:
# step2.1.1 delete files
doc_file_info = File2DocumentService.get_by_document_ids([d['id'] for d in created_documents])
created_files = FileService.get_by_ids([f['file_id'] for f in doc_file_info])
if created_files:
# step2.1.1.1 delete file in storage
for f in created_files:
settings.STORAGE_IMPL.rm(f.parent_id, f.location)
done_msg += f"- Deleted {len(created_files)} uploaded file.\n"
# step2.1.1.2 delete file record
file_delete_res = FileService.delete_by_ids([f.id for f in created_files])
done_msg += f"- Deleted {file_delete_res} file records.\n"
# step2.1.2 delete document-file relation record
file2doc_delete_res = File2DocumentService.delete_by_document_ids_or_file_ids(
[d['id'] for d in created_documents],
[f.id for f in created_files]
)
done_msg += f"- Deleted {file2doc_delete_res} document-file relation records.\n"
# step2.1.3 delete chunks
doc_groups = group_by(created_documents, "tenant_id")
kb_grouped_doc = {k: group_by(v, "kb_id") for k, v in doc_groups.items()}
# chunks in {'tenant_id': {'kb_id': [{'id': doc_id}]}} structure
chunk_delete_res = 0
kb_doc_info = {}
for _tenant_id, kb_doc in kb_grouped_doc.items():
for _kb_id, docs in kb_doc.items():
chunk_delete_res += settings.docStoreConn.delete(
{"doc_id": [d["id"] for d in docs]},
search.index_name(_tenant_id), _kb_id
)
# record doc info
if _kb_id in kb_doc_info.keys():
kb_doc_info[_kb_id]['doc_num'] += 1
kb_doc_info[_kb_id]['token_num'] += sum([d["token_num"] for d in docs])
kb_doc_info[_kb_id]['chunk_num'] += sum([d["chunk_num"] for d in docs])
else:
kb_doc_info[_kb_id] = {
'doc_num': 1,
'token_num': sum([d["token_num"] for d in docs]),
'chunk_num': sum([d["chunk_num"] for d in docs])
}
done_msg += f"- Deleted {chunk_delete_res} chunks.\n"
# step2.1.4 delete tasks
task_delete_res = TaskService.delete_by_doc_ids([d['id'] for d in created_documents])
done_msg += f"- Deleted {task_delete_res} tasks.\n"
# step2.1.5 delete document record
doc_delete_res = DocumentService.delete_by_ids([d['id'] for d in created_documents])
done_msg += f"- Deleted {doc_delete_res} documents.\n"
for doc in created_documents:
try:
DocMetadataService.delete_document_metadata(doc['id'])
except Exception as e:
logging.warning(f"Failed to delete metadata for document {doc['id']}: {e}")
# step2.1.6 update dataset doc&chunk&token cnt
for kb_id, doc_num in kb_doc_info.items():
KnowledgebaseService.decrease_document_num_in_delete(kb_id, doc_num)
# step2.2 delete relation
user_tenant_delete_res = UserTenantService.delete_by_ids([t["id"] for t in tenants])
done_msg += f"- Deleted {user_tenant_delete_res} user-tenant records.\n"
# step3 finally delete user
user_delete_res = UserService.delete_by_id(usr.id)
done_msg += f"- Deleted {user_delete_res} user.\nDelete done!"
return {"success": True, "message": f"Successfully deleted user. Details:\n{done_msg}"}
except Exception as e:
logging.exception(e)
return {"success": False, "message": "An internal error occurred during user deletion. Some operations may have completed.","details": done_msg}
def delete_user_agents(user_id: str) -> dict:
"""
use user_id to delete
:return: {
"agents_deleted_count": 1,
"version_deleted_count": 2
}
"""
agents_deleted_count, agents_version_deleted_count = 0, 0
user_agents = UserCanvasService.get_all_agents_by_tenant_ids([user_id], user_id)
if user_agents:
agents_version = UserCanvasVersionService.get_all_canvas_version_by_canvas_ids([a['id'] for a in user_agents])
agents_version_deleted_count = UserCanvasVersionService.delete_by_ids([v['id'] for v in agents_version])
agents_deleted_count = UserCanvasService.delete_by_ids([a['id'] for a in user_agents])
return {
"agents_deleted_count": agents_deleted_count,
"version_deleted_count": agents_version_deleted_count
}
def delete_user_dialogs(user_id: str) -> dict:
"""
use user_id to delete
:return: {
"dialogs_deleted_count": 1,
"conversations_deleted_count": 1,
"api_token_deleted_count": 2,
"api4conversation_deleted_count": 2
}
"""
dialog_deleted_count, conversations_deleted_count, api_token_deleted_count, api4conversation_deleted_count = 0, 0, 0, 0
user_dialogs = DialogService.get_all_dialogs_by_tenant_id(user_id)
if user_dialogs:
# delete conversation
conversations = ConversationService.get_all_conversation_by_dialog_ids([ud['id'] for ud in user_dialogs])
conversations_deleted_count = ConversationService.delete_by_ids([c['id'] for c in conversations])
# delete api token
api_token_deleted_count = APITokenService.delete_by_tenant_id(user_id)
# delete api for conversation
api4conversation_deleted_count = API4ConversationService.delete_by_dialog_ids([ud['id'] for ud in user_dialogs])
# delete dialog at last
dialog_deleted_count = DialogService.delete_by_ids([ud['id'] for ud in user_dialogs])
return {
"dialogs_deleted_count": dialog_deleted_count,
"conversations_deleted_count": conversations_deleted_count,
"api_token_deleted_count": api_token_deleted_count,
"api4conversation_deleted_count": api4conversation_deleted_count
}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/joint_services/user_account_service.py",
"license": "Apache License 2.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/utils/crypt.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import os
import sys
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from common.file_utils import get_project_base_directory
def crypt(line):
"""
decrypt(crypt(input_string)) == base64(input_string), which frontend and ragflow_cli use.
"""
file_path = os.path.join(get_project_base_directory(), "conf", "public.pem")
rsa_key = RSA.importKey(open(file_path).read(), "Welcome")
cipher = Cipher_pkcs1_v1_5.new(rsa_key)
password_base64 = base64.b64encode(line.encode('utf-8')).decode("utf-8")
encrypted_password = cipher.encrypt(password_base64.encode())
return base64.b64encode(encrypted_password).decode('utf-8')
def decrypt(line):
file_path = os.path.join(get_project_base_directory(), "conf", "private.pem")
rsa_key = RSA.importKey(open(file_path).read(), "Welcome")
cipher = Cipher_pkcs1_v1_5.new(rsa_key)
return cipher.decrypt(base64.b64decode(line), "Fail to decrypt password!").decode('utf-8')
def decrypt2(crypt_text):
from base64 import b64decode, b16decode
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
from Crypto.PublicKey import RSA
decode_data = b64decode(crypt_text)
if len(decode_data) == 127:
hex_fixed = '00' + decode_data.hex()
decode_data = b16decode(hex_fixed.upper())
file_path = os.path.join(get_project_base_directory(), "conf", "private.pem")
pem = open(file_path).read()
rsa_key = RSA.importKey(pem, "Welcome")
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
decrypt_text = cipher.decrypt(decode_data, None)
return (b64decode(decrypt_text)).decode()
if __name__ == "__main__":
passwd = crypt(sys.argv[1])
print(passwd)
print(decrypt(passwd))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/crypt.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:rag/flow/parser/parser.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import io
import json
import os
import random
import re
from functools import partial
from litellm import logging
import numpy as np
from PIL import Image
from api.db.services.file2document_service import File2DocumentService
from api.db.services.file_service import FileService
from api.db.services.llm_service import LLMBundle
from common import settings
from common.constants import LLMType
from common.misc_utils import get_uuid
from deepdoc.parser import ExcelParser
from deepdoc.parser.pdf_parser import PlainParser, RAGFlowPdfParser, VisionParser
from deepdoc.parser.tcadp_parser import TCADPParser
from rag.app.naive import Docx
from rag.flow.base import ProcessBase, ProcessParamBase
from rag.flow.parser.schema import ParserFromUpstream
from rag.llm.cv_model import Base as VLM
from rag.nlp import BULLET_PATTERN, bullets_category, docx_question_level, not_bullet
from rag.utils.base64_image import image2id
from common.misc_utils import thread_pool_exec
class ParserParam(ProcessParamBase):
def __init__(self):
super().__init__()
self.allowed_output_format = {
"pdf": [
"json",
"markdown",
],
"spreadsheet": [
"json",
"markdown",
"html",
],
"word": [
"json",
"markdown",
],
"slides": [
"json",
],
"image": [
"json",
],
"email": [
"text",
"json",
],
"text&markdown": [
"text",
"json",
],
"audio": [
"json",
],
"video": [],
}
self.setups = {
"pdf": {
"parse_method": "deepdoc", # deepdoc/plain_text/tcadp_parser/vlm
"lang": "Chinese",
"suffix": [
"pdf",
],
"output_format": "json",
},
"spreadsheet": {
"parse_method": "deepdoc", # deepdoc/tcadp_parser
"output_format": "html",
"suffix": [
"xls",
"xlsx",
"csv",
],
},
"word": {
"suffix": [
"doc",
"docx",
],
"output_format": "json",
},
"text&markdown": {
"suffix": ["md", "markdown", "mdx", "txt"],
"output_format": "json",
},
"slides": {
"parse_method": "deepdoc", # deepdoc/tcadp_parser
"suffix": [
"pptx",
"ppt",
],
"output_format": "json",
},
"image": {
"parse_method": "ocr",
"llm_id": "",
"lang": "Chinese",
"system_prompt": "",
"suffix": ["jpg", "jpeg", "png", "gif"],
"output_format": "json",
},
"email": {
"suffix": [
"eml",
"msg",
],
"fields": ["from", "to", "cc", "bcc", "date", "subject", "body", "attachments", "metadata"],
"output_format": "json",
},
"audio": {
"suffix": [
"da",
"wave",
"wav",
"mp3",
"aac",
"flac",
"ogg",
"aiff",
"au",
"midi",
"wma",
"realaudio",
"vqf",
"oggvorbis",
"ape",
],
"output_format": "text",
},
"video": {
"suffix": [
"mp4",
"avi",
"mkv",
],
"output_format": "text",
"prompt": "",
},
}
def check(self):
pdf_config = self.setups.get("pdf", {})
if pdf_config:
pdf_parse_method = pdf_config.get("parse_method", "")
self.check_empty(pdf_parse_method, "Parse method abnormal.")
if pdf_parse_method.lower() not in ["deepdoc", "plain_text", "mineru", "tcadp parser", "paddleocr"]:
self.check_empty(pdf_config.get("lang", ""), "PDF VLM language")
pdf_output_format = pdf_config.get("output_format", "")
self.check_valid_value(pdf_output_format, "PDF output format abnormal.", self.allowed_output_format["pdf"])
spreadsheet_config = self.setups.get("spreadsheet", "")
if spreadsheet_config:
spreadsheet_output_format = spreadsheet_config.get("output_format", "")
self.check_valid_value(spreadsheet_output_format, "Spreadsheet output format abnormal.", self.allowed_output_format["spreadsheet"])
doc_config = self.setups.get("word", "")
if doc_config:
doc_output_format = doc_config.get("output_format", "")
self.check_valid_value(doc_output_format, "Word processer document output format abnormal.", self.allowed_output_format["word"])
slides_config = self.setups.get("slides", "")
if slides_config:
slides_output_format = slides_config.get("output_format", "")
self.check_valid_value(slides_output_format, "Slides output format abnormal.", self.allowed_output_format["slides"])
image_config = self.setups.get("image", "")
if image_config:
image_parse_method = image_config.get("parse_method", "")
if image_parse_method not in ["ocr"]:
self.check_empty(image_config.get("lang", ""), "Image VLM language")
text_config = self.setups.get("text&markdown", "")
if text_config:
text_output_format = text_config.get("output_format", "")
self.check_valid_value(text_output_format, "Text output format abnormal.", self.allowed_output_format["text&markdown"])
audio_config = self.setups.get("audio", "")
if audio_config:
self.check_empty(audio_config.get("llm_id"), "Audio VLM")
video_config = self.setups.get("video", "")
if video_config:
self.check_empty(video_config.get("llm_id"), "Video VLM")
email_config = self.setups.get("email", "")
if email_config:
email_output_format = email_config.get("output_format", "")
self.check_valid_value(email_output_format, "Email output format abnormal.", self.allowed_output_format["email"])
def get_input_form(self) -> dict[str, dict]:
return {}
class Parser(ProcessBase):
component_name = "Parser"
@staticmethod
def _extract_word_title_lines(doc, to_page=100000):
lines = []
if not doc or not getattr(doc, "paragraphs", None):
return lines
pn = 0
bull = bullets_category([p.text for p in doc.paragraphs])
for p in doc.paragraphs:
if pn > to_page:
break
question_level, p_text = docx_question_level(p, bull)
lines.append((question_level, p_text))
for run in p.runs:
if "lastRenderedPageBreak" in run._element.xml:
pn += 1
continue
if "w:br" in run._element.xml and 'type="page"' in run._element.xml:
pn += 1
return lines
@staticmethod
def _extract_markdown_title_lines(sections):
lines = []
if not sections:
return lines
section_texts = []
for section in sections:
text = section[0] if isinstance(section, tuple) else section
if not isinstance(text, str):
continue
text = text.strip()
if text:
section_texts.append(text)
if not section_texts:
return lines
bull = bullets_category(section_texts)
if bull < 0:
return lines
bullet_patterns = BULLET_PATTERN[bull]
default_level = len(bullet_patterns) + 1
for text in section_texts:
level = default_level
for idx, pattern in enumerate(bullet_patterns, start=1):
if re.match(pattern, text) and not not_bullet(text):
level = idx
break
lines.append((level, text))
return lines
@staticmethod
def _extract_title_texts(lines):
normalized_lines = []
level_set = set()
for level, txt in lines or []:
if not isinstance(txt, str):
continue
txt = txt.strip()
if not txt:
continue
normalized_lines.append((level, txt))
level_set.add(level)
if not normalized_lines or not level_set:
return set()
sorted_levels = sorted(level_set)
h2_level = sorted_levels[1] if len(sorted_levels) > 1 else 1
h2_level = sorted_levels[-2] if h2_level == sorted_levels[-1] and len(sorted_levels) > 2 else h2_level
return {txt for level, txt in normalized_lines if level <= h2_level}
def _pdf(self, name, blob, **kwargs):
self.callback(random.randint(1, 5) / 100.0, "Start to work on a PDF.")
conf = self._param.setups["pdf"]
self.set_output("output_format", conf["output_format"])
abstract_enabled = "abstract" in self._param.setups["pdf"].get("preprocess", [])
author_enabled = "author" in self._param.setups["pdf"].get("preprocess", [])
title_enabled = "title" in self._param.setups["pdf"].get("preprocess", [])
raw_parse_method = conf.get("parse_method", "")
parser_model_name = None
parse_method = raw_parse_method
parse_method = parse_method or ""
if isinstance(raw_parse_method, str):
lowered = raw_parse_method.lower()
if lowered.endswith("@mineru"):
parser_model_name = raw_parse_method.rsplit("@", 1)[0]
parse_method = "MinerU"
elif lowered.endswith("@paddleocr"):
parser_model_name = raw_parse_method.rsplit("@", 1)[0]
parse_method = "PaddleOCR"
if parse_method.lower() == "deepdoc":
bboxes = RAGFlowPdfParser().parse_into_bboxes(blob, callback=self.callback)
elif parse_method.lower() == "plain_text":
lines, _ = PlainParser()(blob)
bboxes = [{"text": t} for t, _ in lines]
elif parse_method.lower() == "mineru":
def resolve_mineru_llm_name():
configured = parser_model_name or conf.get("mineru_llm_name")
if configured:
return configured
tenant_id = self._canvas._tenant_id
if not tenant_id:
return None
from api.db.services.tenant_llm_service import TenantLLMService
env_name = TenantLLMService.ensure_mineru_from_env(tenant_id)
candidates = TenantLLMService.query(tenant_id=tenant_id, llm_factory="MinerU", model_type=LLMType.OCR.value)
if candidates:
return candidates[0].llm_name
return env_name
parser_model_name = resolve_mineru_llm_name()
if not parser_model_name:
raise RuntimeError("MinerU model not configured. Please add MinerU in Model Providers or set MINERU_* env.")
tenant_id = self._canvas._tenant_id
ocr_model = LLMBundle(tenant_id, LLMType.OCR, llm_name=parser_model_name, lang=conf.get("lang", "Chinese"))
pdf_parser = ocr_model.mdl
lines, _ = pdf_parser.parse_pdf(
filepath=name,
binary=blob,
callback=self.callback,
parse_method=conf.get("mineru_parse_method", "raw"),
lang=conf.get("lang", "Chinese"),
)
bboxes = []
for t, poss in lines:
box = {
"image": pdf_parser.crop(poss, 1),
"positions": [[pos[0][-1], *pos[1:]] for pos in pdf_parser.extract_positions(poss)],
"text": t,
}
bboxes.append(box)
elif parse_method.lower() == "tcadp parser":
# ADP is a document parsing tool using Tencent Cloud API
table_result_type = conf.get("table_result_type", "1")
markdown_image_response_type = conf.get("markdown_image_response_type", "1")
tcadp_parser = TCADPParser(
table_result_type=table_result_type,
markdown_image_response_type=markdown_image_response_type,
)
sections, _ = tcadp_parser.parse_pdf(
filepath=name,
binary=blob,
callback=self.callback,
file_type="PDF",
file_start_page=1,
file_end_page=1000,
)
bboxes = []
for section, position_tag in sections:
if position_tag:
# Extract position information from TCADP's position tag
# Format: @@{page_number}\t{x0}\t{x1}\t{top}\t{bottom}##
match = re.match(r"@@([0-9-]+)\t([0-9.]+)\t([0-9.]+)\t([0-9.]+)\t([0-9.]+)##", position_tag)
if match:
pn, x0, x1, top, bott = match.groups()
bboxes.append(
{
"page_number": int(pn.split("-")[0]), # Take the first page number
"x0": float(x0),
"x1": float(x1),
"top": float(top),
"bottom": float(bott),
"text": section,
}
)
else:
# If no position info, add as text without position
bboxes.append({"text": section})
else:
bboxes.append({"text": section})
elif parse_method.lower() == "paddleocr":
def resolve_paddleocr_llm_name():
configured = parser_model_name or conf.get("paddleocr_llm_name")
if configured:
return configured
tenant_id = self._canvas._tenant_id
if not tenant_id:
return None
from api.db.services.tenant_llm_service import TenantLLMService
env_name = TenantLLMService.ensure_paddleocr_from_env(tenant_id)
candidates = TenantLLMService.query(tenant_id=tenant_id, llm_factory="PaddleOCR", model_type=LLMType.OCR.value)
if candidates:
return candidates[0].llm_name
return env_name
parser_model_name = resolve_paddleocr_llm_name()
if not parser_model_name:
raise RuntimeError("PaddleOCR model not configured. Please add PaddleOCR in Model Providers or set PADDLEOCR_* env.")
tenant_id = self._canvas._tenant_id
ocr_model = LLMBundle(tenant_id, LLMType.OCR, llm_name=parser_model_name)
pdf_parser = ocr_model.mdl
lines, _ = pdf_parser.parse_pdf(
filepath=name,
binary=blob,
callback=self.callback,
parse_method=conf.get("paddleocr_parse_method", "raw"),
)
bboxes = []
for t, poss in lines:
# Get cropped image and positions
cropped_image, positions = pdf_parser.crop(poss, need_position=True)
box = {
"text": t,
"image": cropped_image,
"positions": positions,
}
bboxes.append(box)
else:
vision_model = LLMBundle(self._canvas._tenant_id, LLMType.IMAGE2TEXT, llm_name=conf.get("parse_method"), lang=self._param.setups["pdf"].get("lang"))
lines, _ = VisionParser(vision_model=vision_model)(blob, callback=self.callback)
bboxes = []
for t, poss in lines:
for pn, x0, x1, top, bott in RAGFlowPdfParser.extract_positions(poss):
bboxes.append(
{
"page_number": int(pn[0]),
"x0": float(x0),
"x1": float(x1),
"top": float(top),
"bottom": float(bott),
"text": t,
}
)
for b in bboxes:
text_val = b.get("text", "")
has_text = isinstance(text_val, str) and text_val.strip()
layout = b.get("layout_type")
if layout == "figure" or (b.get("image") and not has_text):
b["doc_type_kwd"] = "image"
elif layout == "table":
b["doc_type_kwd"] = "table"
if title_enabled and "title" in str(b.get("layout_type", "").lower()):
b["title"] = True
# Get authors
if author_enabled:
def _begin(txt):
if not isinstance(txt, str):
return False
return re.match(
r"[0-9. ไธใi]*(introduction|abstract|ๆ่ฆ|ๅผ่จ|keywords|key words|ๅ
ณ้ฎ่ฏ|background|่ๆฏ|็ฎๅฝ|ๅ่จ|contents)",
txt.lower().strip(),
)
i = 0
while i < min(32, len(bboxes) - 1):
b = bboxes[i]
i += 1
layout_type = b.get("layout_type", "")
layoutno = b.get("layoutno", "")
is_title = "title" in str(layout_type).lower() or "title" in str(layoutno).lower()
if not is_title:
continue
title_txt = b.get("text", "")
if _begin(title_txt):
break
for j in range(3):
next_idx = i + j
if next_idx >= len(bboxes):
break
candidate = bboxes[next_idx].get("text", "")
if _begin(candidate):
break
if isinstance(candidate, str) and "@" in candidate:
break
bboxes[next_idx]["author"] = True
break
# Get abstract
if abstract_enabled:
i = 0
abstract_idx = None
while i + 1 < min(32, len(bboxes)):
b = bboxes[i]
i += 1
txt = b.get("text", "")
if not isinstance(txt, str):
continue
txt = txt.lower().strip()
if re.match(r"(abstract|ๆ่ฆ)", txt):
if len(txt.split()) > 32 or len(txt) > 64:
abstract_idx = i - 1
break
next_txt = bboxes[i].get("text", "") if i < len(bboxes) else ""
if isinstance(next_txt, str):
next_txt = next_txt.lower().strip()
if len(next_txt.split()) > 32 or len(next_txt) > 64:
abstract_idx = i
i += 1
break
if abstract_idx is not None:
bboxes[abstract_idx]["abstract"] = True
if conf.get("output_format") == "json":
self.set_output("json", bboxes)
if conf.get("output_format") == "markdown":
mkdn = ""
for b in bboxes:
if b.get("layout_type", "") == "title":
mkdn += "\n## "
if b.get("layout_type", "") == "figure":
mkdn += "\n".format(VLM.image2base64(b["image"]))
continue
mkdn += b.get("text", "") + "\n"
self.set_output("markdown", mkdn)
def _spreadsheet(self, name, blob, **kwargs):
self.callback(random.randint(1, 5) / 100.0, "Start to work on a Spreadsheet.")
conf = self._param.setups["spreadsheet"]
self.set_output("output_format", conf["output_format"])
parse_method = conf.get("parse_method", "deepdoc")
# Handle TCADP parser
if parse_method.lower() == "tcadp parser":
table_result_type = conf.get("table_result_type", "1")
markdown_image_response_type = conf.get("markdown_image_response_type", "1")
tcadp_parser = TCADPParser(
table_result_type=table_result_type,
markdown_image_response_type=markdown_image_response_type,
)
if not tcadp_parser.check_installation():
raise RuntimeError("TCADP parser not available. Please check Tencent Cloud API configuration.")
# Determine file type based on extension
if re.search(r"\.xlsx?$", name, re.IGNORECASE):
file_type = "XLSX"
else:
file_type = "CSV"
self.callback(0.2, f"Using TCADP parser for {file_type} file.")
sections, tables = tcadp_parser.parse_pdf(
filepath=name,
binary=blob,
callback=self.callback,
file_type=file_type,
file_start_page=1,
file_end_page=1000,
)
# Process TCADP parser output based on configured output_format
output_format = conf.get("output_format", "html")
if output_format == "html":
# For HTML output, combine sections and tables into HTML
html_content = ""
for section, position_tag in sections:
if section:
html_content += section + "\n"
for table in tables:
if table:
html_content += table + "\n"
self.set_output("html", html_content)
elif output_format == "json":
# For JSON output, create a list of text items
result = []
# Add sections as text
for section, position_tag in sections:
if section:
result.append({"text": section})
# Add tables as text
for table in tables:
if table:
result.append({"text": table, "doc_type_kwd": "table"})
self.set_output("json", result)
elif output_format == "markdown":
# For markdown output, combine into markdown
md_content = ""
for section, position_tag in sections:
if section:
md_content += section + "\n\n"
for table in tables:
if table:
md_content += table + "\n\n"
self.set_output("markdown", md_content)
else:
# Default DeepDOC parser
spreadsheet_parser = ExcelParser()
if conf.get("output_format") == "html":
htmls = spreadsheet_parser.html(blob, 1000000000)
self.set_output("html", htmls[0])
elif conf.get("output_format") == "json":
self.set_output("json", [{"text": txt} for txt in spreadsheet_parser(blob) if txt])
elif conf.get("output_format") == "markdown":
self.set_output("markdown", spreadsheet_parser.markdown(blob))
def _word(self, name, blob, **kwargs):
self.callback(random.randint(1, 5) / 100.0, "Start to work on a Word Processor Document")
conf = self._param.setups["word"]
self.set_output("output_format", conf["output_format"])
docx_parser = Docx()
if conf.get("output_format") == "json":
main_sections = docx_parser(name, binary=blob)
title_lines = self._extract_word_title_lines(getattr(docx_parser, "doc", None))
title_texts = self._extract_title_texts(title_lines)
sections = []
tbls = []
for text, image, html in main_sections:
section = {"text": text, "image": image}
text_key = text.strip() if isinstance(text, str) else ""
if text_key and text_key in title_texts and "title" in self._param.setups["word"].get("preprocess", []):
section["title"] = True
sections.append(section)
tbls.append(((None, html), ""))
sections.extend([{"text": tb, "image": None, "doc_type_kwd": "table"} for ((_, tb), _) in tbls])
self.set_output("json", sections)
elif conf.get("output_format") == "markdown":
markdown_text = docx_parser.to_markdown(name, binary=blob)
self.set_output("markdown", markdown_text)
def _slides(self, name, blob, **kwargs):
self.callback(random.randint(1, 5) / 100.0, "Start to work on a PowerPoint Document")
conf = self._param.setups["slides"]
self.set_output("output_format", conf["output_format"])
parse_method = conf.get("parse_method", "deepdoc")
# Handle TCADP parser
if parse_method.lower() == "tcadp parser":
table_result_type = conf.get("table_result_type", "1")
markdown_image_response_type = conf.get("markdown_image_response_type", "1")
tcadp_parser = TCADPParser(
table_result_type=table_result_type,
markdown_image_response_type=markdown_image_response_type,
)
if not tcadp_parser.check_installation():
raise RuntimeError("TCADP parser not available. Please check Tencent Cloud API configuration.")
# Determine file type based on extension
if re.search(r"\.pptx?$", name, re.IGNORECASE):
file_type = "PPTX"
else:
file_type = "PPT"
self.callback(0.2, f"Using TCADP parser for {file_type} file.")
sections, tables = tcadp_parser.parse_pdf(
filepath=name,
binary=blob,
callback=self.callback,
file_type=file_type,
file_start_page=1,
file_end_page=1000,
)
# Process TCADP parser output - PPT only supports json format
output_format = conf.get("output_format", "json")
if output_format == "json":
# For JSON output, create a list of text items
result = []
# Add sections as text
for section, position_tag in sections:
if section:
result.append({"text": section})
# Add tables as text
for table in tables:
if table:
result.append({"text": table, "doc_type_kwd": "table"})
self.set_output("json", result)
else:
# Default DeepDOC parser (supports .pptx format)
from deepdoc.parser.ppt_parser import RAGFlowPptParser as ppt_parser
ppt_parser = ppt_parser()
txts = ppt_parser(blob, 0, 100000, None)
sections = [{"text": section} for section in txts if section.strip()]
# json
assert conf.get("output_format") == "json", "have to be json for ppt"
if conf.get("output_format") == "json":
self.set_output("json", sections)
def _markdown(self, name, blob, **kwargs):
from functools import reduce
from rag.app.naive import Markdown as naive_markdown_parser
from rag.nlp import concat_img
self.callback(random.randint(1, 5) / 100.0, "Start to work on a markdown.")
conf = self._param.setups["text&markdown"]
self.set_output("output_format", conf["output_format"])
markdown_parser = naive_markdown_parser()
sections, tables, section_images = markdown_parser(
name,
blob,
separate_tables=False,
delimiter=conf.get("delimiter"),
return_section_images=True,
)
if conf.get("output_format") == "json":
json_results = []
title_lines = self._extract_markdown_title_lines(sections)
title_texts = self._extract_title_texts(title_lines)
for idx, (section_text, _) in enumerate(sections):
json_result = {
"text": section_text,
}
text_key = section_text.strip() if isinstance(section_text, str) else ""
if text_key and text_key in title_texts and "title" in self._param.setups["text&markdown"].get("preprocess", []):
json_result["title"] = True
images = []
if section_images and len(section_images) > idx and section_images[idx] is not None:
images.append(section_images[idx])
if images:
# If multiple images found, combine them using concat_img
combined_image = reduce(concat_img, images) if len(images) > 1 else images[0]
json_result["image"] = combined_image
json_results.append(json_result)
self.set_output("json", json_results)
else:
self.set_output("text", "\n".join([section_text for section_text, _ in sections]))
def _image(self, name, blob, **kwargs):
from deepdoc.vision import OCR
self.callback(random.randint(1, 5) / 100.0, "Start to work on an image.")
conf = self._param.setups["image"]
self.set_output("output_format", conf["output_format"])
img = Image.open(io.BytesIO(blob)).convert("RGB")
if conf["parse_method"] == "ocr":
# use ocr, recognize chars only
ocr = OCR()
bxs = ocr(np.array(img)) # return boxes and recognize result
txt = "\n".join([t[0] for _, t in bxs if t[0]])
else:
lang = conf["lang"]
# use VLM to describe the picture
cv_model = LLMBundle(self._canvas.get_tenant_id(), LLMType.IMAGE2TEXT, llm_name=conf["parse_method"], lang=lang)
img_binary = io.BytesIO()
img.save(img_binary, format="JPEG")
img_binary.seek(0)
system_prompt = conf.get("system_prompt")
if system_prompt:
txt = cv_model.describe_with_prompt(img_binary.read(), system_prompt)
else:
txt = cv_model.describe(img_binary.read())
json_result = [{
"text": txt,
"image": img,
"doc_type_kwd": "image",
}]
self.set_output("json", json_result)
def _audio(self, name, blob, **kwargs):
import os
import tempfile
self.callback(random.randint(1, 5) / 100.0, "Start to work on an audio.")
conf = self._param.setups["audio"]
self.set_output("output_format", conf["output_format"])
_, ext = os.path.splitext(name)
with tempfile.NamedTemporaryFile(suffix=ext) as tmpf:
tmpf.write(blob)
tmpf.flush()
tmp_path = os.path.abspath(tmpf.name)
seq2txt_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.SPEECH2TEXT, llm_name=conf["llm_id"])
txt = seq2txt_mdl.transcription(tmp_path)
self.set_output("text", txt)
def _video(self, name, blob, **kwargs):
self.callback(random.randint(1, 5) / 100.0, "Start to work on an video.")
conf = self._param.setups["video"]
self.set_output("output_format", conf["output_format"])
cv_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.IMAGE2TEXT, llm_name=conf["llm_id"])
video_prompt = str(conf.get("prompt", "") or "")
txt = asyncio.run(cv_mdl.async_chat(system="", history=[], gen_conf={}, video_bytes=blob, filename=name, video_prompt=video_prompt))
self.set_output("text", txt)
def _email(self, name, blob, **kwargs):
self.callback(random.randint(1, 5) / 100.0, "Start to work on an email.")
email_content = {}
conf = self._param.setups["email"]
self.set_output("output_format", conf["output_format"])
target_fields = conf["fields"]
_, ext = os.path.splitext(name)
if ext == ".eml":
# handle eml file
from email import policy
from email.parser import BytesParser
msg = BytesParser(policy=policy.default).parse(io.BytesIO(blob))
email_content["metadata"] = {}
# handle header info
for header, value in msg.items():
# get fields like from, to, cc, bcc, date, subject
if header.lower() in target_fields:
email_content[header.lower()] = value
# get metadata
elif header.lower() not in ["from", "to", "cc", "bcc", "date", "subject"]:
email_content["metadata"][header.lower()] = value
# get body
if "body" in target_fields:
body_text, body_html = [], []
def _add_content(m, content_type):
def _decode_payload(payload, charset, target_list):
try:
target_list.append(payload.decode(charset))
except (UnicodeDecodeError, LookupError):
for enc in ["utf-8", "gb2312", "gbk", "gb18030", "latin1"]:
try:
target_list.append(payload.decode(enc))
break
except UnicodeDecodeError:
continue
else:
target_list.append(payload.decode("utf-8", errors="ignore"))
if content_type == "text/plain":
payload = msg.get_payload(decode=True)
charset = msg.get_content_charset() or "utf-8"
_decode_payload(payload, charset, body_text)
elif content_type == "text/html":
payload = msg.get_payload(decode=True)
charset = msg.get_content_charset() or "utf-8"
_decode_payload(payload, charset, body_html)
elif "multipart" in content_type:
if m.is_multipart():
for part in m.iter_parts():
_add_content(part, part.get_content_type())
_add_content(msg, msg.get_content_type())
email_content["text"] = "\n".join(body_text)
email_content["text_html"] = "\n".join(body_html)
# get attachment
if "attachments" in target_fields:
attachments = []
for part in msg.iter_attachments():
content_disposition = part.get("Content-Disposition")
if content_disposition:
dispositions = content_disposition.strip().split(";")
if dispositions[0].lower() == "attachment":
filename = part.get_filename()
payload = part.get_payload(decode=True).decode(part.get_content_charset())
attachments.append(
{
"filename": filename,
"payload": payload,
}
)
email_content["attachments"] = attachments
else:
# handle msg file
import extract_msg
print("handle a msg file.")
msg = extract_msg.Message(blob)
# handle header info
basic_content = {
"from": msg.sender,
"to": msg.to,
"cc": msg.cc,
"bcc": msg.bcc,
"date": msg.date,
"subject": msg.subject,
}
email_content.update({k: v for k, v in basic_content.items() if k in target_fields})
# get metadata
email_content["metadata"] = {
"message_id": msg.messageId,
"in_reply_to": msg.inReplyTo,
}
# get body
if "body" in target_fields:
email_content["text"] = msg.body[0] if isinstance(msg.body, list) and msg.body else msg.body
if not email_content["text"] and msg.htmlBody:
email_content["text"] = msg.htmlBody[0] if isinstance(msg.htmlBody, list) and msg.htmlBody else msg.htmlBody
# get attachments
if "attachments" in target_fields:
attachments = []
for t in msg.attachments:
attachments.append(
{
"filename": t.name,
"payload": t.data.decode("utf-8"),
}
)
email_content["attachments"] = attachments
if conf["output_format"] == "json":
self.set_output("json", [email_content])
else:
content_txt = ""
for k, v in email_content.items():
if isinstance(v, str):
# basic info
content_txt += f"{k}:{v}" + "\n"
elif isinstance(v, dict):
# metadata
content_txt += f"{k}:{json.dumps(v)}" + "\n"
elif isinstance(v, list):
# attachments or others
for fb in v:
if isinstance(fb, dict):
# attachments
content_txt += f"{fb['filename']}:{fb['payload']}" + "\n"
else:
# str, usually plain text
content_txt += fb
self.set_output("text", content_txt)
async def _invoke(self, **kwargs):
function_map = {
"pdf": self._pdf,
"text&markdown": self._markdown,
"spreadsheet": self._spreadsheet,
"slides": self._slides,
"word": self._word,
"image": self._image,
"audio": self._audio,
"video": self._video,
"email": self._email,
}
try:
from_upstream = ParserFromUpstream.model_validate(kwargs)
except Exception as e:
self.set_output("_ERROR", f"Input error: {str(e)}")
return
name = from_upstream.name
if self._canvas._doc_id:
b, n = File2DocumentService.get_storage_address(doc_id=self._canvas._doc_id)
blob = settings.STORAGE_IMPL.get(b, n)
else:
blob = FileService.get_blob(from_upstream.file["created_by"], from_upstream.file["id"])
done = False
for p_type, conf in self._param.setups.items():
if from_upstream.name.split(".")[-1].lower() not in conf.get("suffix", []):
continue
call_kwargs = dict(kwargs)
call_kwargs.pop("name", None)
call_kwargs.pop("blob", None)
await thread_pool_exec(function_map[p_type], name, blob, **call_kwargs)
done = True
break
if not done:
raise Exception("No suitable for file extension: `.%s`" % from_upstream.name.split(".")[-1].lower())
outs = self.output()
tasks = []
for d in outs.get("json", []):
tasks.append(asyncio.create_task(image2id(d, partial(settings.STORAGE_IMPL.put, tenant_id=self._canvas._tenant_id), get_uuid())))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error("Error while parsing: %s" % e)
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/parser/parser.py",
"license": "Apache License 2.0",
"lines": 906,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/flow/parser/schema.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydantic import BaseModel, ConfigDict, Field
class ParserFromUpstream(BaseModel):
created_time: float | None = Field(default=None, alias="_created_time")
elapsed_time: float | None = Field(default=None, alias="_elapsed_time")
name: str
file: dict | None = Field(default=None)
abstract: bool = False
author: bool = False
model_config = ConfigDict(populate_by_name=True, extra="forbid")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/parser/schema.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:rag/flow/tokenizer/schema.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Literal
from pydantic import BaseModel, ConfigDict, Field, model_validator
class TokenizerFromUpstream(BaseModel):
created_time: float | None = Field(default=None, alias="_created_time")
elapsed_time: float | None = Field(default=None, alias="_elapsed_time")
name: str = ""
file: dict | None = Field(default=None)
output_format: Literal["json", "markdown", "text", "html", "chunks"] | None = Field(default=None)
chunks: list[dict[str, Any]] | None = Field(default=None)
json_result: list[dict[str, Any]] | None = Field(default=None, alias="json")
markdown_result: str | None = Field(default=None, alias="markdown")
text_result: str | None = Field(default=None, alias="text")
html_result: str | None = Field(default=None, alias="html")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
@model_validator(mode="after")
def _check_payloads(self) -> "TokenizerFromUpstream":
if self.chunks:
return self
if self.output_format in {"markdown", "text", "html"}:
if self.output_format == "markdown" and not self.markdown_result:
raise ValueError("output_format=markdown requires a markdown payload (field: 'markdown' or 'markdown_result').")
if self.output_format == "text" and not self.text_result:
raise ValueError("output_format=text requires a text payload (field: 'text' or 'text_result').")
if self.output_format == "html" and not self.html_result:
raise ValueError("output_format=text requires a html payload (field: 'html' or 'html_result').")
else:
if not self.json_result and not self.chunks:
raise ValueError("When no chunks are provided and output_format is not markdown/text, a JSON list payload is required (field: 'json' or 'json_result').")
return self
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/tokenizer/schema.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/searxng.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
import requests
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
from common.connection_utils import timeout
class SearXNGParam(ToolParamBase):
"""
Define the SearXNG component parameters.
"""
def __init__(self):
self.meta: ToolMeta = {
"name": "searxng_search",
"description": "SearXNG is a privacy-focused metasearch engine that aggregates results from multiple search engines without tracking users. It provides comprehensive web search capabilities.",
"parameters": {
"query": {
"type": "string",
"description": "The search keywords to execute with SearXNG. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
},
"searxng_url": {
"type": "string",
"description": "The base URL of your SearXNG instance (e.g., http://localhost:4000). This is required to connect to your SearXNG server.",
"required": False,
"default": ""
}
}
}
super().__init__()
self.top_n = 10
self.searxng_url = ""
def check(self):
# Keep validation lenient so opening try-run panel won't fail without URL.
# Coerce top_n to int if it comes as string from UI.
try:
if isinstance(self.top_n, str):
self.top_n = int(self.top_n.strip())
except Exception:
pass
self.check_positive_integer(self.top_n, "Top N")
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
},
"searxng_url": {
"name": "SearXNG URL",
"type": "line",
"placeholder": "http://localhost:4000"
}
}
class SearXNG(ToolBase, ABC):
component_name = "SearXNG"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("SearXNG processing"):
return
# Gracefully handle try-run without inputs
query = kwargs.get("query")
if not query or not isinstance(query, str) or not query.strip():
self.set_output("formalized_content", "")
return ""
searxng_url = (getattr(self._param, "searxng_url", "") or kwargs.get("searxng_url") or "").strip()
# In try-run, if no URL configured, just return empty instead of raising
if not searxng_url:
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("SearXNG processing"):
return
try:
search_params = {
'q': query,
'format': 'json',
'categories': 'general',
'language': 'auto',
'safesearch': 1,
'pageno': 1
}
response = requests.get(
f"{searxng_url}/search",
params=search_params,
timeout=10
)
response.raise_for_status()
if self.check_if_canceled("SearXNG processing"):
return
data = response.json()
if not data or not isinstance(data, dict):
raise ValueError("Invalid response from SearXNG")
results = data.get("results", [])
if not isinstance(results, list):
raise ValueError("Invalid results format from SearXNG")
results = results[:self._param.top_n]
if self.check_if_canceled("SearXNG processing"):
return
self._retrieve_chunks(results,
get_title=lambda r: r.get("title", ""),
get_url=lambda r: r.get("url", ""),
get_content=lambda r: r.get("content", ""))
self.set_output("json", results)
return self.output("formalized_content")
except requests.RequestException as e:
if self.check_if_canceled("SearXNG processing"):
return
last_e = f"Network error: {e}"
logging.exception(f"SearXNG network error: {e}")
time.sleep(self._param.delay_after_error)
except Exception as e:
if self.check_if_canceled("SearXNG processing"):
return
last_e = str(e)
logging.exception(f"SearXNG error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", last_e)
return f"SearXNG error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return """
Keywords: {}
Searching with SearXNG for relevant results...
""".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/searxng.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/flow/base.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import os
import time
from functools import partial
from typing import Any
from agent.component.base import ComponentBase, ComponentParamBase
from common.connection_utils import timeout
class ProcessParamBase(ComponentParamBase):
def __init__(self):
super().__init__()
self.timeout = 100000000
self.persist_logs = True
class ProcessBase(ComponentBase):
def __init__(self, pipeline, id, param: ProcessParamBase):
super().__init__(pipeline, id, param)
if hasattr(self._canvas, "callback"):
self.callback = partial(self._canvas.callback, id)
else:
self.callback = partial(lambda *args, **kwargs: None, id)
async def invoke(self, **kwargs) -> dict[str, Any]:
self.set_output("_created_time", time.perf_counter())
for k, v in kwargs.items():
self.set_output(k, v)
try:
await asyncio.wait_for(
self._invoke(**kwargs),
timeout=self._param.timeout
)
self.callback(1, "Done")
except Exception as e:
if self.get_exception_default_value():
self.set_exception_default_value()
else:
self.set_output("_ERROR", str(e))
logging.exception(e)
self.callback(-1, str(e))
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
return self.output()
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)))
async def _invoke(self, **kwargs):
raise NotImplementedError()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/base.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:rag/flow/pipeline.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import datetime
import json
import logging
import random
from timeit import default_timer as timer
from agent.canvas import Graph
from api.db.services.document_service import DocumentService
from api.db.services.task_service import has_canceled, TaskService, CANVAS_DEBUG_DOC_ID
from rag.utils.redis_conn import REDIS_CONN
class Pipeline(Graph):
def __init__(self, dsl: str|dict, tenant_id=None, doc_id=None, task_id=None, flow_id=None):
if isinstance(dsl, dict):
dsl = json.dumps(dsl, ensure_ascii=False)
super().__init__(dsl, tenant_id, task_id)
if doc_id == CANVAS_DEBUG_DOC_ID:
doc_id = None
self._doc_id = doc_id
self._flow_id = flow_id
self._kb_id = None
if self._doc_id:
self._kb_id = DocumentService.get_knowledgebase_id(doc_id)
if not self._kb_id:
self._doc_id = None
def callback(self, component_name: str, progress: float | int | None = None, message: str = "") -> None:
from common.exceptions import TaskCanceledException
log_key = f"{self._flow_id}-{self.task_id}-logs"
timestamp = timer()
if has_canceled(self.task_id):
progress = -1
message += "[CANCEL]"
try:
bin = REDIS_CONN.get(log_key)
obj = json.loads(bin.encode("utf-8"))
if obj:
if obj[-1]["component_id"] == component_name:
obj[-1]["trace"].append(
{
"progress": progress,
"message": message,
"datetime": datetime.datetime.now().strftime("%H:%M:%S"),
"timestamp": timestamp,
"elapsed_time": timestamp - obj[-1]["trace"][-1]["timestamp"],
}
)
else:
obj.append(
{
"component_id": component_name,
"trace": [{"progress": progress, "message": message, "datetime": datetime.datetime.now().strftime("%H:%M:%S"), "timestamp": timestamp, "elapsed_time": 0}],
}
)
else:
obj = [
{
"component_id": component_name,
"trace": [{"progress": progress, "message": message, "datetime": datetime.datetime.now().strftime("%H:%M:%S"), "timestamp": timestamp, "elapsed_time": 0}],
}
]
if component_name != "END" and self._doc_id and self.task_id:
percentage = 1.0 / len(self.components.items())
finished = 0.0
for o in obj:
for t in o["trace"]:
if t["progress"] < 0:
finished = -1
break
if finished < 0:
break
finished += o["trace"][-1]["progress"] * percentage
msg = ""
if len(obj[-1]["trace"]) == 1:
msg += f"\n-------------------------------------\n[{self.get_component_name(o['component_id'])}]:\n"
t = obj[-1]["trace"][-1]
msg += "%s: %s\n" % (t["datetime"], t["message"])
TaskService.update_progress(self.task_id, {"progress": finished, "progress_msg": msg})
elif component_name == "END" and not self._doc_id:
obj[-1]["trace"][-1]["dsl"] = json.loads(str(self))
REDIS_CONN.set_obj(log_key, obj, 60 * 30)
except Exception as e:
logging.exception(e)
if has_canceled(self.task_id):
raise TaskCanceledException(message)
def fetch_logs(self):
log_key = f"{self._flow_id}-{self.task_id}-logs"
try:
bin = REDIS_CONN.get(log_key)
if bin:
return json.loads(bin.encode("utf-8"))
except Exception as e:
logging.exception(e)
return []
async def run(self, **kwargs):
log_key = f"{self._flow_id}-{self.task_id}-logs"
try:
REDIS_CONN.set_obj(log_key, [], 60 * 10)
except Exception as e:
logging.exception(e)
self.error = ""
if not self.path:
self.path.append("File")
cpn_obj = self.get_component_obj(self.path[0])
await cpn_obj.invoke(**kwargs)
if cpn_obj.error():
self.error = "[ERROR]" + cpn_obj.error()
self.callback(cpn_obj.component_name, -1, self.error)
if self._doc_id:
TaskService.update_progress(self.task_id, {
"progress": random.randint(0, 5) / 100.0,
"progress_msg": "Start the pipeline...",
"begin_at": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
idx = len(self.path) - 1
cpn_obj = self.get_component_obj(self.path[idx])
idx += 1
self.path.extend(cpn_obj.get_downstream())
while idx < len(self.path) and not self.error:
last_cpn = self.get_component_obj(self.path[idx - 1])
cpn_obj = self.get_component_obj(self.path[idx])
async def invoke():
nonlocal last_cpn, cpn_obj
await cpn_obj.invoke(**last_cpn.output())
#if inspect.iscoroutinefunction(cpn_obj.invoke):
# await cpn_obj.invoke(**last_cpn.output())
#else:
# cpn_obj.invoke(**last_cpn.output())
tasks = []
tasks.append(asyncio.create_task(invoke()))
await asyncio.gather(*tasks)
if cpn_obj.error():
self.error = "[ERROR]" + cpn_obj.error()
self.callback(cpn_obj._id, -1, self.error)
break
idx += 1
self.path.extend(cpn_obj.get_downstream())
self.callback("END", 1 if not self.error else -1, json.dumps(self.get_component_obj(self.path[-1]).output(), ensure_ascii=False))
if not self.error:
return self.get_component_obj(self.path[-1]).output()
TaskService.update_progress(self.task_id, {
"progress": -1,
"progress_msg": f"[ERROR]: {self.error}"})
return {}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/pipeline.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/flow/tests/client.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import asyncio
import json
import os
import time
from concurrent.futures import ThreadPoolExecutor
from common import settings
from rag.flow.pipeline import Pipeline
def print_logs(pipeline: Pipeline):
last_logs = "[]"
while True:
time.sleep(5)
logs = pipeline.fetch_logs()
logs_str = json.dumps(logs, ensure_ascii=False)
if logs_str != last_logs:
print(logs_str)
last_logs = logs_str
if __name__ == "__main__":
parser = argparse.ArgumentParser()
dsl_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"dsl_examples",
"general_pdf_all.json",
)
parser.add_argument("-s", "--dsl", default=dsl_default_path, help="input dsl", action="store", required=False)
parser.add_argument("-d", "--doc_id", default=False, help="Document ID", action="store", required=True)
parser.add_argument("-t", "--tenant_id", default=False, help="Tenant ID", action="store", required=True)
args = parser.parse_args()
settings.init_settings()
pipeline = Pipeline(open(args.dsl, "r").read(), tenant_id=args.tenant_id, doc_id=args.doc_id, task_id="xxxx", flow_id="xxx")
pipeline.reset()
exe = ThreadPoolExecutor(max_workers=5)
thr = exe.submit(print_logs, pipeline)
# queue_dataflow(dsl=open(args.dsl, "r").read(), tenant_id=args.tenant_id, doc_id=args.doc_id, task_id="xxxx", flow_id="xxx", priority=0)
asyncio.run(pipeline.run())
thr.result()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/flow/tests/client.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/db/services/tenant_llm_service.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import logging
from peewee import IntegrityError
from langfuse import Langfuse
from common import settings
from common.constants import MINERU_DEFAULT_CONFIG, MINERU_ENV_KEYS, PADDLEOCR_DEFAULT_CONFIG, PADDLEOCR_ENV_KEYS, LLMType
from api.db.db_models import DB, LLMFactories, TenantLLM
from api.db.services.common_service import CommonService
from api.db.services.langfuse_service import TenantLangfuseService
from api.db.services.user_service import TenantService
from rag.llm import ChatModel, CvModel, EmbeddingModel, OcrModel, RerankModel, Seq2txtModel, TTSModel
class LLMFactoriesService(CommonService):
model = LLMFactories
class TenantLLMService(CommonService):
model = TenantLLM
@classmethod
@DB.connection_context()
def get_api_key(cls, tenant_id, model_name):
mdlnm, fid = TenantLLMService.split_model_name_and_factory(model_name)
if not fid:
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm)
else:
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
if (not objs) and fid:
if fid == "LocalAI":
mdlnm += "___LocalAI"
elif fid == "HuggingFace":
mdlnm += "___HuggingFace"
elif fid == "OpenAI-API-Compatible":
mdlnm += "___OpenAI-API"
elif fid == "VLLM":
mdlnm += "___VLLM"
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
if not objs:
return None
return objs[0]
@classmethod
@DB.connection_context()
def get_my_llms(cls, tenant_id):
fields = [cls.model.llm_factory, LLMFactories.logo, LLMFactories.tags, cls.model.model_type, cls.model.llm_name, cls.model.used_tokens, cls.model.status]
objs = cls.model.select(*fields).join(LLMFactories, on=(cls.model.llm_factory == LLMFactories.name)).where(cls.model.tenant_id == tenant_id, ~cls.model.api_key.is_null()).dicts()
return list(objs)
@staticmethod
def split_model_name_and_factory(model_name):
arr = model_name.split("@")
if len(arr) < 2:
return model_name, None
if len(arr) > 2:
return "@".join(arr[0:-1]), arr[-1]
# model name must be xxx@yyy
try:
model_factories = settings.FACTORY_LLM_INFOS
model_providers = set([f["name"] for f in model_factories])
if arr[-1] not in model_providers:
return model_name, None
return arr[0], arr[-1]
except Exception as e:
logging.exception(f"TenantLLMService.split_model_name_and_factory got exception: {e}")
return model_name, None
@classmethod
@DB.connection_context()
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
from api.db.services.llm_service import LLMService
e, tenant = TenantService.get_by_id(tenant_id)
if not e:
raise LookupError("Tenant not found")
if llm_type == LLMType.EMBEDDING.value:
mdlnm = tenant.embd_id if not llm_name else llm_name
elif llm_type == LLMType.SPEECH2TEXT.value:
mdlnm = tenant.asr_id if not llm_name else llm_name
elif llm_type == LLMType.IMAGE2TEXT.value:
mdlnm = tenant.img2txt_id if not llm_name else llm_name
elif llm_type == LLMType.CHAT.value:
mdlnm = tenant.llm_id if not llm_name else llm_name
elif llm_type == LLMType.RERANK:
mdlnm = tenant.rerank_id if not llm_name else llm_name
elif llm_type == LLMType.TTS:
mdlnm = tenant.tts_id if not llm_name else llm_name
elif llm_type == LLMType.OCR:
if not llm_name:
raise LookupError("OCR model name is required")
mdlnm = llm_name
else:
assert False, "LLM type error"
model_config = cls.get_api_key(tenant_id, mdlnm)
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
if not model_config: # for some cases seems fid mismatch
model_config = cls.get_api_key(tenant_id, mdlnm)
if model_config:
model_config = model_config.to_dict()
elif llm_type == LLMType.EMBEDDING and fid == "Builtin" and "tei-" in os.getenv("COMPOSE_PROFILES", "") and mdlnm == os.getenv("TEI_MODEL", ""):
embedding_cfg = settings.EMBEDDING_CFG
model_config = {"llm_factory": "Builtin", "api_key": embedding_cfg["api_key"], "llm_name": mdlnm, "api_base": embedding_cfg["base_url"]}
else:
raise LookupError(f"Model({mdlnm}@{fid}) not authorized")
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
if not llm and fid: # for some cases seems fid mismatch
llm = LLMService.query(llm_name=mdlnm)
if llm:
model_config["is_tools"] = llm[0].is_tools
return model_config
@classmethod
@DB.connection_context()
def model_instance(cls, tenant_id, llm_type, llm_name=None, lang="Chinese", **kwargs):
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
kwargs.update({"provider": model_config["llm_factory"]})
if llm_type == LLMType.EMBEDDING.value:
if model_config["llm_factory"] not in EmbeddingModel:
return None
return EmbeddingModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
elif llm_type == LLMType.RERANK:
if model_config["llm_factory"] not in RerankModel:
return None
return RerankModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
elif llm_type == LLMType.IMAGE2TEXT.value:
if model_config["llm_factory"] not in CvModel:
return None
return CvModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], lang, base_url=model_config["api_base"], **kwargs)
elif llm_type == LLMType.CHAT.value:
if model_config["llm_factory"] not in ChatModel:
return None
return ChatModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"], **kwargs)
elif llm_type == LLMType.SPEECH2TEXT:
if model_config["llm_factory"] not in Seq2txtModel:
return None
return Seq2txtModel[model_config["llm_factory"]](key=model_config["api_key"], model_name=model_config["llm_name"], lang=lang, base_url=model_config["api_base"])
elif llm_type == LLMType.TTS:
if model_config["llm_factory"] not in TTSModel:
return None
return TTSModel[model_config["llm_factory"]](
model_config["api_key"],
model_config["llm_name"],
base_url=model_config["api_base"],
)
elif llm_type == LLMType.OCR:
if model_config["llm_factory"] not in OcrModel:
return None
return OcrModel[model_config["llm_factory"]](
key=model_config["api_key"],
model_name=model_config["llm_name"],
base_url=model_config.get("api_base", ""),
**kwargs,
)
return None
@classmethod
@DB.connection_context()
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
e, tenant = TenantService.get_by_id(tenant_id)
if not e:
logging.error(f"Tenant not found: {tenant_id}")
return 0
llm_map = {
LLMType.EMBEDDING.value: tenant.embd_id if not llm_name else llm_name,
LLMType.SPEECH2TEXT.value: tenant.asr_id,
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name,
LLMType.OCR.value: llm_name,
}
mdlnm = llm_map.get(llm_type)
if mdlnm is None:
logging.error(f"LLM type error: {llm_type}")
return 0
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
try:
num = (
cls.model.update(used_tokens=cls.model.used_tokens + used_tokens)
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == llm_name, cls.model.llm_factory == llm_factory if llm_factory else True)
.execute()
)
except Exception:
logging.exception("TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s", tenant_id, llm_name)
return 0
return num
@classmethod
@DB.connection_context()
def get_openai_models(cls):
objs = cls.model.select().where((cls.model.llm_factory == "OpenAI"), ~(cls.model.llm_name == "text-embedding-3-small"), ~(cls.model.llm_name == "text-embedding-3-large")).dicts()
return list(objs)
@classmethod
def _collect_mineru_env_config(cls) -> dict | None:
cfg = MINERU_DEFAULT_CONFIG
found = False
for key in MINERU_ENV_KEYS:
val = os.environ.get(key)
if val:
found = True
cfg[key] = val
return cfg if found else None
@classmethod
@DB.connection_context()
def ensure_mineru_from_env(cls, tenant_id: str) -> str | None:
"""
Ensure a MinerU OCR model exists for the tenant if env variables are present.
Return the existing or newly created llm_name, or None if env not set.
"""
cfg = cls._collect_mineru_env_config()
if not cfg:
return None
saved_mineru_models = cls.query(tenant_id=tenant_id, llm_factory="MinerU", model_type=LLMType.OCR.value)
def _parse_api_key(raw: str) -> dict:
try:
return json.loads(raw or "{}")
except Exception:
return {}
for item in saved_mineru_models:
api_cfg = _parse_api_key(item.api_key)
normalized = {k: api_cfg.get(k, MINERU_DEFAULT_CONFIG.get(k)) for k in MINERU_ENV_KEYS}
if normalized == cfg:
return item.llm_name
used_names = {item.llm_name for item in saved_mineru_models}
idx = 1
base_name = "mineru-from-env"
while True:
candidate = f"{base_name}-{idx}"
if candidate in used_names:
idx += 1
continue
try:
cls.save(
tenant_id=tenant_id,
llm_factory="MinerU",
llm_name=candidate,
model_type=LLMType.OCR.value,
api_key=json.dumps(cfg),
api_base="",
max_tokens=0,
)
return candidate
except IntegrityError:
logging.warning("MinerU env model %s already exists for tenant %s, retry with next name", candidate, tenant_id)
used_names.add(candidate)
idx += 1
continue
@classmethod
def _collect_paddleocr_env_config(cls) -> dict | None:
cfg = PADDLEOCR_DEFAULT_CONFIG
found = False
for key in PADDLEOCR_ENV_KEYS:
val = os.environ.get(key)
if val:
found = True
cfg[key] = val
return cfg if found else None
@classmethod
@DB.connection_context()
def ensure_paddleocr_from_env(cls, tenant_id: str) -> str | None:
"""
Ensure a PaddleOCR model exists for the tenant if env variables are present.
Return the existing or newly created llm_name, or None if env not set.
"""
cfg = cls._collect_paddleocr_env_config()
if not cfg:
return None
saved_paddleocr_models = cls.query(tenant_id=tenant_id, llm_factory="PaddleOCR", model_type=LLMType.OCR.value)
def _parse_api_key(raw: str) -> dict:
try:
return json.loads(raw or "{}")
except Exception:
return {}
for item in saved_paddleocr_models:
api_cfg = _parse_api_key(item.api_key)
normalized = {k: api_cfg.get(k, PADDLEOCR_DEFAULT_CONFIG.get(k)) for k in PADDLEOCR_ENV_KEYS}
if normalized == cfg:
return item.llm_name
used_names = {item.llm_name for item in saved_paddleocr_models}
idx = 1
base_name = "paddleocr-from-env"
while True:
candidate = f"{base_name}-{idx}"
if candidate in used_names:
idx += 1
continue
try:
cls.save(
tenant_id=tenant_id,
llm_factory="PaddleOCR",
llm_name=candidate,
model_type=LLMType.OCR.value,
api_key=json.dumps(cfg),
api_base="",
max_tokens=0,
)
return candidate
except IntegrityError:
logging.warning("PaddleOCR env model %s already exists for tenant %s, retry with next name", candidate, tenant_id)
used_names.add(candidate)
idx += 1
continue
@classmethod
@DB.connection_context()
def delete_by_tenant_id(cls, tenant_id):
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
@staticmethod
def llm_id2llm_type(llm_id: str) -> str | None:
from api.db.services.llm_service import LLMService
llm_id, *_ = TenantLLMService.split_model_name_and_factory(llm_id)
llm_factories = settings.FACTORY_LLM_INFOS
for llm_factory in llm_factories:
for llm in llm_factory["llm"]:
if llm_id == llm["llm_name"]:
return llm["model_type"].split(",")[-1]
for llm in LLMService.query(llm_name=llm_id):
return llm.model_type
llm = TenantLLMService.get_or_none(llm_name=llm_id)
if llm:
return llm.model_type
for llm in TenantLLMService.query(llm_name=llm_id):
return llm.model_type
return None
class LLM4Tenant:
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese", **kwargs):
self.tenant_id = tenant_id
self.llm_type = llm_type
self.llm_name = llm_name
self.mdl = TenantLLMService.model_instance(tenant_id, llm_type, llm_name, lang=lang, **kwargs)
assert self.mdl, "Can't find model for {}/{}/{}".format(tenant_id, llm_type, llm_name)
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
self.max_length = model_config.get("max_tokens", 8192)
self.is_tools = model_config.get("is_tools", False)
self.verbose_tool_use = kwargs.get("verbose_tool_use")
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=tenant_id)
self.langfuse = None
if langfuse_keys:
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
try:
if langfuse.auth_check():
self.langfuse = langfuse
trace_id = self.langfuse.create_trace_id()
self.trace_context = {"trace_id": trace_id}
except Exception:
# Skip langfuse tracing if connection fails
pass
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/tenant_llm_service.py",
"license": "Apache License 2.0",
"lines": 350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/llm.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import json
import logging
import os
import re
from copy import deepcopy
from typing import Any, AsyncGenerator
import json_repair
from functools import partial
from common.constants import LLMType
from api.db.services.llm_service import LLMBundle
from api.db.services.tenant_llm_service import TenantLLMService
from agent.component.base import ComponentBase, ComponentParamBase
from common.connection_utils import timeout
from rag.prompts.generator import tool_call_summary, message_fit_in, citation_prompt, structured_output_prompt
class LLMParam(ComponentParamBase):
"""
Define the LLM component parameters.
"""
def __init__(self):
super().__init__()
self.llm_id = ""
self.sys_prompt = ""
self.prompts = [{"role": "user", "content": "{sys.query}"}]
self.max_tokens = 0
self.temperature = 0
self.top_p = 0
self.presence_penalty = 0
self.frequency_penalty = 0
self.output_structure = None
self.cite = True
self.visual_files_var = None
def check(self):
self.check_decimal_float(float(self.temperature), "[Agent] Temperature")
self.check_decimal_float(float(self.presence_penalty), "[Agent] Presence penalty")
self.check_decimal_float(float(self.frequency_penalty), "[Agent] Frequency penalty")
self.check_nonnegative_number(int(self.max_tokens), "[Agent] Max tokens")
self.check_decimal_float(float(self.top_p), "[Agent] Top P")
self.check_empty(self.llm_id, "[Agent] LLM")
self.check_empty(self.prompts, "[Agent] User prompt")
def gen_conf(self):
conf = {}
def get_attr(nm):
try:
return getattr(self, nm)
except Exception:
pass
if int(self.max_tokens) > 0 and get_attr("maxTokensEnabled"):
conf["max_tokens"] = int(self.max_tokens)
if float(self.temperature) > 0 and get_attr("temperatureEnabled"):
conf["temperature"] = float(self.temperature)
if float(self.top_p) > 0 and get_attr("topPEnabled"):
conf["top_p"] = float(self.top_p)
if float(self.presence_penalty) > 0 and get_attr("presencePenaltyEnabled"):
conf["presence_penalty"] = float(self.presence_penalty)
if float(self.frequency_penalty) > 0 and get_attr("frequencyPenaltyEnabled"):
conf["frequency_penalty"] = float(self.frequency_penalty)
return conf
class LLM(ComponentBase):
component_name = "LLM"
def __init__(self, canvas, component_id, param: ComponentParamBase):
super().__init__(canvas, component_id, param)
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id),
self._param.llm_id, max_retries=self._param.max_retries,
retry_interval=self._param.delay_after_error
)
self.imgs = []
def get_input_form(self) -> dict[str, dict]:
res = {}
for k, v in self.get_input_elements().items():
res[k] = {
"type": "line",
"name": v["name"]
}
return res
def get_input_elements(self) -> dict[str, Any]:
res = self.get_input_elements_from_text(self._param.sys_prompt)
if isinstance(self._param.prompts, str):
self._param.prompts = [{"role": "user", "content": self._param.prompts}]
for prompt in self._param.prompts:
d = self.get_input_elements_from_text(prompt["content"])
res.update(d)
return res
def set_debug_inputs(self, inputs: dict[str, dict]):
self._param.debug_inputs = inputs
def add2system_prompt(self, txt):
self._param.sys_prompt += txt
def _sys_prompt_and_msg(self, msg, args):
if isinstance(self._param.prompts, str):
self._param.prompts = [{"role": "user", "content": self._param.prompts}]
for p in self._param.prompts:
if msg and msg[-1]["role"] == p["role"]:
continue
p = deepcopy(p)
p["content"] = self.string_format(p["content"], args)
msg.append(p)
return msg, self.string_format(self._param.sys_prompt, args)
@staticmethod
def _extract_data_images(value) -> list[str]:
imgs = []
def walk(v):
if v is None:
return
if isinstance(v, str):
v = v.strip()
if v.startswith("data:image/"):
imgs.append(v)
return
if isinstance(v, (list, tuple, set)):
for item in v:
walk(item)
return
if isinstance(v, dict):
if "content" in v:
walk(v.get("content"))
else:
for item in v.values():
walk(item)
walk(value)
return imgs
@staticmethod
def _uniq_images(images: list[str]) -> list[str]:
seen = set()
uniq = []
for img in images:
if not isinstance(img, str):
continue
if not img.startswith("data:image/"):
continue
if img in seen:
continue
seen.add(img)
uniq.append(img)
return uniq
@classmethod
def _remove_data_images(cls, value):
if value is None:
return None
if isinstance(value, str):
return None if value.strip().startswith("data:image/") else value
if isinstance(value, list):
cleaned = []
for item in value:
v = cls._remove_data_images(item)
if v is None:
continue
if isinstance(v, (list, tuple, set, dict)) and not v:
continue
cleaned.append(v)
return cleaned
if isinstance(value, tuple):
cleaned = []
for item in value:
v = cls._remove_data_images(item)
if v is None:
continue
if isinstance(v, (list, tuple, set, dict)) and not v:
continue
cleaned.append(v)
return tuple(cleaned)
if isinstance(value, set):
cleaned = []
for item in value:
v = cls._remove_data_images(item)
if v is None:
continue
if isinstance(v, (list, tuple, set, dict)) and not v:
continue
cleaned.append(v)
return cleaned
if isinstance(value, dict):
if value.get("type") in {"image_url", "input_image", "image"} and cls._extract_data_images(value):
return None
cleaned = {}
for k, item in value.items():
v = cls._remove_data_images(item)
if v is None:
continue
if isinstance(v, (list, tuple, set, dict)) and not v:
continue
cleaned[k] = v
return cleaned
return value
def _prepare_prompt_variables(self):
self.imgs = []
if self._param.visual_files_var:
self.imgs.extend(self._extract_data_images(self._canvas.get_variable_value(self._param.visual_files_var)))
args = {}
vars = self.get_input_elements() if not self._param.debug_inputs else self._param.debug_inputs
extracted_imgs = []
for k, o in vars.items():
raw_value = o["value"]
extracted_imgs.extend(self._extract_data_images(raw_value))
args[k] = self._remove_data_images(raw_value)
if args[k] is None:
args[k] = ""
if not isinstance(args[k], str):
try:
args[k] = json.dumps(args[k], ensure_ascii=False)
except Exception:
args[k] = str(args[k])
self.set_input_value(k, args[k])
self.imgs = self._uniq_images(self.imgs + extracted_imgs)
if self.imgs and TenantLLMService.llm_id2llm_type(self._param.llm_id) == LLMType.CHAT.value:
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.IMAGE2TEXT.value,
self._param.llm_id, max_retries=self._param.max_retries,
retry_interval=self._param.delay_after_error
)
msg, sys_prompt = self._sys_prompt_and_msg(self._canvas.get_history(self._param.message_history_window_size)[:-1], args)
user_defined_prompt, sys_prompt = self._extract_prompts(sys_prompt)
if self._param.cite and self._canvas.get_reference()["chunks"]:
sys_prompt += citation_prompt(user_defined_prompt)
return sys_prompt, msg, user_defined_prompt
def _extract_prompts(self, sys_prompt):
pts = {}
for tag in ["TASK_ANALYSIS", "PLAN_GENERATION", "REFLECTION", "CONTEXT_SUMMARY", "CONTEXT_RANKING", "CITATION_GUIDELINES"]:
r = re.search(rf"<{tag}>(.*?)</{tag}>", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
if not r:
continue
pts[tag.lower()] = r.group(1)
sys_prompt = re.sub(rf"<{tag}>(.*?)</{tag}>", "", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
return pts, sys_prompt
async def _generate_async(self, msg: list[dict], **kwargs) -> str:
if not self.imgs:
return await self.chat_mdl.async_chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
return await self.chat_mdl.async_chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
async def _generate_streamly(self, msg: list[dict], **kwargs) -> AsyncGenerator[str, None]:
async def delta_wrapper(txt_iter):
ans = ""
last_idx = 0
endswith_think = False
def delta(txt):
nonlocal ans, last_idx, endswith_think
delta_ans = txt[last_idx:]
ans = txt
if delta_ans.find("<think>") == 0:
last_idx += len("<think>")
return "<think>"
elif delta_ans.find("<think>") > 0:
delta_ans = txt[last_idx:last_idx + delta_ans.find("<think>")]
last_idx += delta_ans.find("<think>")
return delta_ans
elif delta_ans.endswith("</think>"):
endswith_think = True
elif endswith_think:
endswith_think = False
return "</think>"
last_idx = len(ans)
if ans.endswith("</think>"):
last_idx -= len("</think>")
return re.sub(r"(<think>|</think>)", "", delta_ans)
async for t in txt_iter:
yield delta(t)
if not self.imgs:
async for t in delta_wrapper(self.chat_mdl.async_chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)):
yield t
return
async for t in delta_wrapper(self.chat_mdl.async_chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)):
yield t
async def _stream_output_async(self, prompt, msg):
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
answer = ""
last_idx = 0
endswith_think = False
def delta(txt):
nonlocal answer, last_idx, endswith_think
delta_ans = txt[last_idx:]
answer = txt
if delta_ans.find("<think>") == 0:
last_idx += len("<think>")
return "<think>"
elif delta_ans.find("<think>") > 0:
delta_ans = txt[last_idx:last_idx + delta_ans.find("<think>")]
last_idx += delta_ans.find("<think>")
return delta_ans
elif delta_ans.endswith("</think>"):
endswith_think = True
elif endswith_think:
endswith_think = False
return "</think>"
last_idx = len(answer)
if answer.endswith("</think>"):
last_idx -= len("</think>")
return re.sub(r"(<think>|</think>)", "", delta_ans)
stream_kwargs = {"images": self.imgs} if self.imgs else {}
async for ans in self.chat_mdl.async_chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), **stream_kwargs):
if self.check_if_canceled("LLM streaming"):
return
if isinstance(ans, int):
continue
if ans.find("**ERROR**") >= 0:
if self.get_exception_default_value():
self.set_output("content", self.get_exception_default_value())
yield self.get_exception_default_value()
else:
self.set_output("_ERROR", ans)
return
yield delta(ans)
self.set_output("content", answer)
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
async def _invoke_async(self, **kwargs):
if self.check_if_canceled("LLM processing"):
return
def clean_formated_answer(ans: str) -> str:
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
prompt, msg, _ = self._prepare_prompt_variables()
error: str = ""
output_structure = None
try:
output_structure = self._param.outputs["structured"]
except Exception:
pass
if output_structure and isinstance(output_structure, dict) and output_structure.get("properties") and len(output_structure["properties"]) > 0:
schema = json.dumps(output_structure, ensure_ascii=False, indent=2)
prompt_with_schema = prompt + structured_output_prompt(schema)
for _ in range(self._param.max_retries + 1):
if self.check_if_canceled("LLM processing"):
return
_, msg_fit = message_fit_in(
[{"role": "system", "content": prompt_with_schema}, *deepcopy(msg)],
int(self.chat_mdl.max_length * 0.97),
)
error = ""
ans = await self._generate_async(msg_fit)
msg_fit.pop(0)
if ans.find("**ERROR**") >= 0:
logging.error(f"LLM response error: {ans}")
error = ans
continue
try:
self.set_output("structured", json_repair.loads(clean_formated_answer(ans)))
return
except Exception:
msg_fit.append({"role": "user", "content": "The answer can't not be parsed as JSON"})
error = "The answer can't not be parsed as JSON"
if error:
self.set_output("_ERROR", error)
return
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
ex = self.exception_handler()
if any([self._canvas.get_component_obj(cid).component_name.lower() == "message" for cid in downstreams]) and not (
ex and ex["goto"]
):
self.set_output("content", partial(self._stream_output_async, prompt, deepcopy(msg)))
return
error = ""
for _ in range(self._param.max_retries + 1):
if self.check_if_canceled("LLM processing"):
return
_, msg_fit = message_fit_in(
[{"role": "system", "content": prompt}, *deepcopy(msg)], int(self.chat_mdl.max_length * 0.97)
)
error = ""
ans = await self._generate_async(msg_fit)
msg_fit.pop(0)
if ans.find("**ERROR**") >= 0:
logging.error(f"LLM response error: {ans}")
error = ans
continue
self.set_output("content", ans)
break
if error:
if self.get_exception_default_value():
self.set_output("content", self.get_exception_default_value())
else:
self.set_output("_ERROR", error)
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
return asyncio.run(self._invoke_async(**kwargs))
async def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str, user_defined_prompt:dict={}):
summ = await tool_call_summary(self.chat_mdl, func_name, params, results, user_defined_prompt)
logging.info(f"[MEMORY]: {summ}")
self._canvas.add_memory(user, assist, summ)
def thoughts(self) -> str:
_, msg,_ = self._prepare_prompt_variables()
return "โGive me a momentโstarting from: \n\n" + re.sub(r"(User's query:|[\\]+)", '', msg[-1]['content'], flags=re.DOTALL) + "\n\nIโll figure out our best next move."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/llm.py",
"license": "Apache License 2.0",
"lines": 393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/string_transform.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from abc import ABC
from typing import Any
from jinja2.sandbox import SandboxedEnvironment
_jinja2_sandbox = SandboxedEnvironment()
from agent.component.base import ComponentParamBase
from common.connection_utils import timeout
from .message import Message
class StringTransformParam(ComponentParamBase):
"""
Define the code sandbox component parameters.
"""
def __init__(self):
super().__init__()
self.method = "split"
self.script = ""
self.split_ref = ""
self.delimiters = [","]
self.outputs = {"result": {"value": "", "type": "string"}}
def check(self):
self.check_valid_value(self.method, "Support method", ["split", "merge"])
self.check_empty(self.delimiters, "delimiters")
class StringTransform(Message, ABC):
component_name = "StringTransform"
def get_input_elements(self) -> dict[str, Any]:
return self.get_input_elements_from_text(self._param.script)
def get_input_form(self) -> dict[str, dict]:
if self._param.method == "split":
return {
"line": {
"name": "String",
"type": "line"
}
}
return {k: {
"name": o["name"],
"type": "line"
} for k, o in self.get_input_elements_from_text(self._param.script).items()}
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("StringTransform processing"):
return
if self._param.method == "split":
self._split(kwargs.get("line"))
else:
self._merge(kwargs)
def _split(self, line:str|None = None):
if self.check_if_canceled("StringTransform split processing"):
return
var = self._canvas.get_variable_value(self._param.split_ref) if not line else line
if not var:
var = ""
assert isinstance(var, str), "The input variable is not a string: {}".format(type(var))
self.set_input_value(self._param.split_ref, var)
res = []
for i,s in enumerate(re.split(r"(%s)"%("|".join([re.escape(d) for d in self._param.delimiters])), var, flags=re.DOTALL)):
if i % 2 == 1:
continue
res.append(s)
self.set_output("result", res)
def _merge(self, kwargs:dict[str, str] = {}):
if self.check_if_canceled("StringTransform merge processing"):
return
script = self._param.script
script, kwargs = self.get_kwargs(script, kwargs, self._param.delimiters[0])
if self._is_jinjia2(script):
template = _jinja2_sandbox.from_string(script)
try:
script = template.render(kwargs)
except Exception:
pass
for k,v in kwargs.items():
if not v:
v = ""
script = re.sub(k, lambda match: v, script)
self.set_output("result", script)
def thoughts(self) -> str:
return f"It's {self._param.method}ing."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/string_transform.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/arxiv.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
import arxiv
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
from common.connection_utils import timeout
class ArXivParam(ToolParamBase):
"""
Define the ArXiv component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "arxiv_search",
"description": """arXiv is a free distribution service and an open-access archive for nearly 2.4 million scholarly articles in the fields of physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and systems science, and economics. Materials on this site are not peer-reviewed by arXiv.""",
"parameters": {
"query": {
"type": "string",
"description": "The search keywords to execute with arXiv. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.top_n = 12
self.sort_by = 'submittedDate'
def check(self):
self.check_positive_integer(self.top_n, "Top N")
self.check_valid_value(self.sort_by, "ArXiv Search Sort_by",
['submittedDate', 'lastUpdatedDate', 'relevance'])
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class ArXiv(ToolBase, ABC):
component_name = "ArXiv"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("ArXiv processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("ArXiv processing"):
return
try:
sort_choices = {"relevance": arxiv.SortCriterion.Relevance,
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
'submittedDate': arxiv.SortCriterion.SubmittedDate}
arxiv_client = arxiv.Client()
search = arxiv.Search(
query=kwargs["query"],
max_results=self._param.top_n,
sort_by=sort_choices[self._param.sort_by]
)
results = list(arxiv_client.results(search))
if self.check_if_canceled("ArXiv processing"):
return
self._retrieve_chunks(results,
get_title=lambda r: r.title,
get_url=lambda r: r.pdf_url,
get_content=lambda r: r.summary)
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("ArXiv processing"):
return
last_e = e
logging.exception(f"ArXiv error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"ArXiv error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return """
Keywords: {}
Looking for the most relevant articles.
""".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/arxiv.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/base.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import time
from copy import deepcopy
import asyncio
from functools import partial
from typing import TypedDict, List, Any
from agent.component.base import ComponentParamBase, ComponentBase
from common.misc_utils import hash_str2int
from rag.prompts.generator import kb_prompt
from common.mcp_tool_call_conn import MCPToolCallSession, ToolCallSession
from timeit import default_timer as timer
from common.misc_utils import thread_pool_exec
class ToolParameter(TypedDict):
type: str
description: str
displayDescription: str
enum: List[str]
required: bool
class ToolMeta(TypedDict):
name: str
displayName: str
description: str
displayDescription: str
parameters: dict[str, ToolParameter]
class LLMToolPluginCallSession(ToolCallSession):
def __init__(self, tools_map: dict[str, object], callback: partial):
self.tools_map = tools_map
self.callback = callback
def tool_call(self, name: str, arguments: dict[str, Any]) -> Any:
return asyncio.run(self.tool_call_async(name, arguments))
async def tool_call_async(self, name: str, arguments: dict[str, Any]) -> Any:
assert name in self.tools_map, f"LLM tool {name} does not exist"
st = timer()
tool_obj = self.tools_map[name]
if isinstance(tool_obj, MCPToolCallSession):
resp = await thread_pool_exec(tool_obj.tool_call, name, arguments, 60)
else:
if hasattr(tool_obj, "invoke_async") and asyncio.iscoroutinefunction(tool_obj.invoke_async):
resp = await tool_obj.invoke_async(**arguments)
else:
resp = await thread_pool_exec(tool_obj.invoke, **arguments)
self.callback(name, arguments, resp, elapsed_time=timer()-st)
return resp
def get_tool_obj(self, name):
return self.tools_map[name]
class ToolParamBase(ComponentParamBase):
def __init__(self):
#self.meta:ToolMeta = None
super().__init__()
self._init_inputs()
self._init_attr_by_meta()
def _init_inputs(self):
self.inputs = {}
for k,p in self.meta["parameters"].items():
self.inputs[k] = deepcopy(p)
def _init_attr_by_meta(self):
for k,p in self.meta["parameters"].items():
if not hasattr(self, k):
setattr(self, k, p.get("default"))
def get_meta(self):
params = {}
for k, p in self.meta["parameters"].items():
params[k] = {
"type": p["type"],
"description": p["description"]
}
if "enum" in p:
params[k]["enum"] = p["enum"]
desc = self.meta["description"]
if hasattr(self, "description"):
desc = self.description
function_name = self.meta["name"]
if hasattr(self, "function_name"):
function_name = self.function_name
return {
"type": "function",
"function": {
"name": function_name,
"description": desc,
"parameters": {
"type": "object",
"properties": params,
"required": [k for k, p in self.meta["parameters"].items() if p["required"]]
}
}
}
class ToolBase(ComponentBase):
def __init__(self, canvas, id, param: ComponentParamBase):
from agent.canvas import Canvas # Local import to avoid cyclic dependency
assert isinstance(canvas, Canvas), "canvas must be an instance of Canvas"
self._canvas = canvas
self._id = id
self._param = param
self._param.check()
def get_meta(self) -> dict[str, Any]:
return self._param.get_meta()
def invoke(self, **kwargs):
if self.check_if_canceled("Tool processing"):
return
self.set_output("_created_time", time.perf_counter())
try:
res = self._invoke(**kwargs)
except Exception as e:
self._param.outputs["_ERROR"] = {"value": str(e)}
logging.exception(e)
res = str(e)
self._param.debug_inputs = []
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
return res
async def invoke_async(self, **kwargs):
"""
Async wrapper for tool invocation.
If `_invoke` is a coroutine, await it directly; otherwise run in a thread to avoid blocking.
Mirrors the exception handling of `invoke`.
"""
if self.check_if_canceled("Tool processing"):
return
self.set_output("_created_time", time.perf_counter())
try:
fn_async = getattr(self, "_invoke_async", None)
if fn_async and asyncio.iscoroutinefunction(fn_async):
res = await fn_async(**kwargs)
elif asyncio.iscoroutinefunction(self._invoke):
res = await self._invoke(**kwargs)
else:
res = await thread_pool_exec(self._invoke, **kwargs)
except Exception as e:
self._param.outputs["_ERROR"] = {"value": str(e)}
logging.exception(e)
res = str(e)
self._param.debug_inputs = []
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
return res
def _retrieve_chunks(self, res_list: list, get_title, get_url, get_content, get_score=None):
chunks = []
aggs = []
for r in res_list:
content = get_content(r)
if not content:
continue
content = re.sub(r"!?\[[a-z]+\]\(data:image/png;base64,[ 0-9A-Za-z/_=+-]+\)", "", content)
content = content[:10000]
if not content:
continue
id = str(hash_str2int(content))
title = get_title(r)
url = get_url(r)
score = get_score(r) if get_score else 1
chunks.append({
"chunk_id": id,
"content": content,
"doc_id": id,
"docnm_kwd": title,
"similarity": score,
"url": url
})
aggs.append({
"doc_name": title,
"doc_id": id,
"count": 1,
"url": url
})
self._canvas.add_reference(chunks, aggs)
self.set_output("formalized_content", "\n".join(kb_prompt({"chunks": chunks, "doc_aggs": aggs}, 200000, True)))
def thoughts(self) -> str:
return self._canvas.get_component_name(self._id) + " is running..."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/base.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/code_exec.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ast
import base64
import json
import logging
import os
from abc import ABC
from typing import Optional
from pydantic import BaseModel, Field, field_validator
from strenum import StrEnum
from agent.tools.base import ToolBase, ToolMeta, ToolParamBase
from common import settings
from common.connection_utils import timeout
class Language(StrEnum):
PYTHON = "python"
NODEJS = "nodejs"
class CodeExecutionRequest(BaseModel):
code_b64: str = Field(..., description="Base64 encoded code string")
language: str = Field(default=Language.PYTHON.value, description="Programming language")
arguments: Optional[dict] = Field(default={}, description="Arguments")
@field_validator("code_b64")
@classmethod
def validate_base64(cls, v: str) -> str:
try:
base64.b64decode(v, validate=True)
return v
except Exception as e:
raise ValueError(f"Invalid base64 encoding: {str(e)}")
@field_validator("language", mode="before")
@classmethod
def normalize_language(cls, v) -> str:
if isinstance(v, str):
low = v.lower()
if low in ("python", "python3"):
return "python"
elif low in ("javascript", "nodejs"):
return "nodejs"
raise ValueError(f"Unsupported language: {v}")
class CodeExecParam(ToolParamBase):
"""
Define the code sandbox component parameters.
"""
def __init__(self):
self.meta: ToolMeta = {
"name": "execute_code",
"description": """
This tool has a sandbox that can execute code written in 'Python'/'Javascript'. It receives a piece of code and return a Json string.
Here's a code example for Python(`main` function MUST be included):
def main() -> dict:
\"\"\"
Generate Fibonacci numbers within 100.
\"\"\"
def fibonacci_recursive(n):
if n <= 1:
return n
else:
return fibonacci_recursive(n-1) + fibonacci_recursive(n-2)
return {
"result": fibonacci_recursive(100),
}
Here's a code example for Javascript(`main` function MUST be included and exported):
const axios = require('axios');
async function main(args) {
try {
const response = await axios.get('https://github.com/infiniflow/ragflow');
console.log('Body:', response.data);
} catch (error) {
console.error('Error:', error.message);
}
}
module.exports = { main };
""",
"parameters": {
"lang": {
"type": "string",
"description": "The programming language of this piece of code.",
"enum": ["python", "javascript"],
"required": True,
},
"script": {"type": "string", "description": "A piece of code in right format. There MUST be main function.", "required": True},
},
}
super().__init__()
self.lang = Language.PYTHON.value
self.script = 'def main(arg1: str, arg2: str) -> dict: return {"result": arg1 + arg2}'
self.arguments = {}
self.outputs = {"result": {"value": "", "type": "object"}}
def check(self):
self.check_valid_value(self.lang, "Support languages", ["python", "python3", "nodejs", "javascript"])
self.check_empty(self.script, "Script")
def get_input_form(self) -> dict[str, dict]:
res = {}
for k, v in self.arguments.items():
res[k] = {"type": "line", "name": k}
return res
class CodeExec(ToolBase, ABC):
component_name = "CodeExec"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("CodeExec processing"):
return
lang = kwargs.get("lang", self._param.lang)
script = kwargs.get("script", self._param.script)
arguments = {}
for k, v in self._param.arguments.items():
if kwargs.get(k):
arguments[k] = kwargs[k]
continue
arguments[k] = self._canvas.get_variable_value(v) if v else None
return self._execute_code(language=lang, code=script, arguments=arguments)
def _execute_code(self, language: str, code: str, arguments: dict):
import requests
if self.check_if_canceled("CodeExec execution"):
return self.output()
try:
# Try using the new sandbox provider system first
try:
from agent.sandbox.client import execute_code as sandbox_execute_code
if self.check_if_canceled("CodeExec execution"):
return
# Execute code using the provider system
result = sandbox_execute_code(
code=code,
language=language,
timeout=int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)),
arguments=arguments
)
if self.check_if_canceled("CodeExec execution"):
return
# Process the result
if result.stderr:
self.set_output("_ERROR", result.stderr)
return
parsed_stdout = self._deserialize_stdout(result.stdout)
logging.info(f"[CodeExec]: Provider system -> {parsed_stdout}")
self._populate_outputs(parsed_stdout, result.stdout)
return
except (ImportError, RuntimeError) as provider_error:
# Provider system not available or not configured, fall back to HTTP
logging.info(f"[CodeExec]: Provider system not available, using HTTP fallback: {provider_error}")
# Fallback to direct HTTP request
code_b64 = self._encode_code(code)
code_req = CodeExecutionRequest(code_b64=code_b64, language=language, arguments=arguments).model_dump()
except Exception as e:
if self.check_if_canceled("CodeExec execution"):
return self.output()
self.set_output("_ERROR", "construct code request error: " + str(e))
return self.output()
try:
if self.check_if_canceled("CodeExec execution"):
self.set_output("_ERROR", "Task has been canceled")
return self.output()
resp = requests.post(url=f"http://{settings.SANDBOX_HOST}:9385/run", json=code_req, timeout=int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)))
logging.info(f"http://{settings.SANDBOX_HOST}:9385/run, code_req: {code_req}, resp.status_code {resp.status_code}:")
if self.check_if_canceled("CodeExec execution"):
return "Task has been canceled"
if resp.status_code != 200:
resp.raise_for_status()
body = resp.json()
if body:
stderr = body.get("stderr")
if stderr:
self.set_output("_ERROR", stderr)
return self.output()
raw_stdout = body.get("stdout", "")
parsed_stdout = self._deserialize_stdout(raw_stdout)
logging.info(f"[CodeExec]: http://{settings.SANDBOX_HOST}:9385/run -> {parsed_stdout}")
self._populate_outputs(parsed_stdout, raw_stdout)
else:
self.set_output("_ERROR", "There is no response from sandbox")
return self.output()
except Exception as e:
if self.check_if_canceled("CodeExec execution"):
return self.output()
self.set_output("_ERROR", "Exception executing code: " + str(e))
return self.output()
def _encode_code(self, code: str) -> str:
return base64.b64encode(code.encode("utf-8")).decode("utf-8")
def thoughts(self) -> str:
return "Running a short script to process data."
def _deserialize_stdout(self, stdout: str):
text = str(stdout).strip()
if not text:
return ""
for loader in (json.loads, ast.literal_eval):
try:
return loader(text)
except Exception:
continue
return text
def _coerce_output_value(self, value, expected_type: Optional[str]):
if expected_type is None:
return value
etype = expected_type.strip().lower()
inner_type = None
if etype.startswith("array<") and etype.endswith(">"):
inner_type = etype[6:-1].strip()
etype = "array"
try:
if etype == "string":
return "" if value is None else str(value)
if etype == "number":
if value is None or value == "":
return None
if isinstance(value, (int, float)):
return value
if isinstance(value, str):
try:
return float(value)
except Exception:
return value
return float(value)
if etype == "boolean":
if isinstance(value, bool):
return value
if isinstance(value, str):
lv = value.lower()
if lv in ("true", "1", "yes", "y", "on"):
return True
if lv in ("false", "0", "no", "n", "off"):
return False
return bool(value)
if etype == "array":
candidate = value
if isinstance(candidate, str):
parsed = self._deserialize_stdout(candidate)
candidate = parsed
if isinstance(candidate, tuple):
candidate = list(candidate)
if not isinstance(candidate, list):
candidate = [] if candidate is None else [candidate]
if inner_type == "string":
return ["" if v is None else str(v) for v in candidate]
if inner_type == "number":
coerced = []
for v in candidate:
try:
if v is None or v == "":
coerced.append(None)
elif isinstance(v, (int, float)):
coerced.append(v)
else:
coerced.append(float(v))
except Exception:
coerced.append(v)
return coerced
return candidate
if etype == "object":
if isinstance(value, dict):
return value
if isinstance(value, str):
parsed = self._deserialize_stdout(value)
if isinstance(parsed, dict):
return parsed
return value
except Exception:
return value
return value
def _populate_outputs(self, parsed_stdout, raw_stdout: str):
outputs_items = list(self._param.outputs.items())
logging.info(f"[CodeExec]: outputs schema keys: {[k for k, _ in outputs_items]}")
if not outputs_items:
return
if isinstance(parsed_stdout, dict):
for key, meta in outputs_items:
if key.startswith("_"):
continue
val = self._get_by_path(parsed_stdout, key)
if val is None and len(outputs_items) == 1:
val = parsed_stdout
coerced = self._coerce_output_value(val, meta.get("type"))
logging.info(f"[CodeExec]: populate dict key='{key}' raw='{val}' coerced='{coerced}'")
self.set_output(key, coerced)
return
if isinstance(parsed_stdout, (list, tuple)):
for idx, (key, meta) in enumerate(outputs_items):
if key.startswith("_"):
continue
val = parsed_stdout[idx] if idx < len(parsed_stdout) else None
coerced = self._coerce_output_value(val, meta.get("type"))
logging.info(f"[CodeExec]: populate list key='{key}' raw='{val}' coerced='{coerced}'")
self.set_output(key, coerced)
return
default_val = parsed_stdout if parsed_stdout is not None else raw_stdout
for idx, (key, meta) in enumerate(outputs_items):
if key.startswith("_"):
continue
val = default_val if idx == 0 else None
coerced = self._coerce_output_value(val, meta.get("type"))
logging.info(f"[CodeExec]: populate scalar key='{key}' raw='{val}' coerced='{coerced}'")
self.set_output(key, coerced)
def _get_by_path(self, data, path: str):
if not path:
return None
cur = data
for part in path.split("."):
part = part.strip()
if not part:
return None
if isinstance(cur, dict):
cur = cur.get(part)
elif isinstance(cur, list):
try:
idx = int(part)
cur = cur[idx]
except Exception:
return None
else:
return None
if cur is None:
return None
logging.info(f"[CodeExec]: resolve path '{path}' -> {cur}")
return cur
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/code_exec.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/duckduckgo.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
from duckduckgo_search import DDGS
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
from common.connection_utils import timeout
class DuckDuckGoParam(ToolParamBase):
"""
Define the DuckDuckGo component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "duckduckgo_search",
"description": "DuckDuckGo is a search engine focused on privacy. It offers search capabilities for web pages, images, and provides translation services. DuckDuckGo also features a private AI chat interface, providing users with an AI assistant that prioritizes data protection.",
"parameters": {
"query": {
"type": "string",
"description": "The search keywords to execute with DuckDuckGo. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
},
"channel": {
"type": "string",
"description": "default:general. The category of the search. `news` is useful for retrieving real-time updates, particularly about politics, sports, and major current events covered by mainstream media sources. `general` is for broader, more general-purpose searches that may include a wide range of sources.",
"enum": ["general", "news"],
"default": "general",
"required": False,
},
}
}
super().__init__()
self.top_n = 10
self.channel = "text"
def check(self):
self.check_positive_integer(self.top_n, "Top N")
self.check_valid_value(self.channel, "Web Search or News", ["text", "news"])
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
},
"channel": {
"name": "Channel",
"type": "options",
"value": "general",
"options": ["general", "news"]
}
}
class DuckDuckGo(ToolBase, ABC):
component_name = "DuckDuckGo"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("DuckDuckGo processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("DuckDuckGo processing"):
return
try:
if kwargs.get("topic", "general") == "general":
with DDGS() as ddgs:
if self.check_if_canceled("DuckDuckGo processing"):
return
# {'title': '', 'href': '', 'body': ''}
duck_res = ddgs.text(kwargs["query"], max_results=self._param.top_n)
if self.check_if_canceled("DuckDuckGo processing"):
return
self._retrieve_chunks(duck_res,
get_title=lambda r: r["title"],
get_url=lambda r: r.get("href", r.get("url")),
get_content=lambda r: r["body"])
self.set_output("json", duck_res)
return self.output("formalized_content")
else:
with DDGS() as ddgs:
if self.check_if_canceled("DuckDuckGo processing"):
return
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
duck_res = ddgs.news(kwargs["query"], max_results=self._param.top_n)
if self.check_if_canceled("DuckDuckGo processing"):
return
self._retrieve_chunks(duck_res,
get_title=lambda r: r["title"],
get_url=lambda r: r.get("href", r.get("url")),
get_content=lambda r: r["body"])
self.set_output("json", duck_res)
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("DuckDuckGo processing"):
return
last_e = e
logging.exception(f"DuckDuckGo error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"DuckDuckGo error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return """
Keywords: {}
Looking for the most relevant articles.
""".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/duckduckgo.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/email.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
from abc import ABC
import json
import smtplib
import logging
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from email.utils import formataddr
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
from common.connection_utils import timeout
class EmailParam(ToolParamBase):
"""
Define the Email component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "email",
"description": "The email is a method of electronic communication for sending and receiving information through the Internet. This tool helps users to send emails to one person or to multiple recipients with support for CC, BCC, file attachments, and markdown-to-HTML conversion.",
"parameters": {
"to_email": {
"type": "string",
"description": "The target email address.",
"default": "{sys.query}",
"required": True
},
"cc_email": {
"type": "string",
"description": "The other email addresses needs to be send to. Comma splited.",
"default": "",
"required": False
},
"content": {
"type": "string",
"description": "The content of the email.",
"default": "",
"required": False
},
"subject": {
"type": "string",
"description": "The subject/title of the email.",
"default": "",
"required": False
}
}
}
super().__init__()
# Fixed configuration parameters
self.smtp_server = "" # SMTP server address
self.smtp_port = 465 # SMTP port
self.email = "" # Sender email
self.smtp_username = "" # Optional SMTP login username, fallback to sender email
self.password = "" # Email authorization code
self.sender_name = "" # Sender name
def check(self):
# Check required parameters
self.check_empty(self.smtp_server, "SMTP Server")
self.check_empty(self.email, "Email")
self.check_empty(self.password, "Password")
self.check_empty(self.sender_name, "Sender Name")
def get_input_form(self) -> dict[str, dict]:
return {
"to_email": {
"name": "To ",
"type": "line"
},
"subject": {
"name": "Subject",
"type": "line",
"optional": True
},
"cc_email": {
"name": "CC To",
"type": "line",
"optional": True
},
}
class Email(ToolBase, ABC):
component_name = "Email"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("Email processing"):
return
if not kwargs.get("to_email"):
self.set_output("success", False)
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("Email processing"):
return
try:
# Parse JSON string passed from upstream
email_data = kwargs
# Validate required fields
if "to_email" not in email_data:
self.set_output("_ERROR", "Missing required field: to_email")
self.set_output("success", False)
return False
# Create email object
msg = MIMEMultipart('alternative')
# Properly handle sender name encoding
msg['From'] = formataddr((str(Header(self._param.sender_name,'utf-8')), self._param.email))
msg['To'] = email_data["to_email"]
if email_data.get("cc_email"):
msg['Cc'] = email_data["cc_email"]
msg['Subject'] = Header(email_data.get("subject", "No Subject"), 'utf-8').encode()
# Use content from email_data or default content
email_content = email_data.get("content", "No content provided")
# msg.attach(MIMEText(email_content, 'plain', 'utf-8'))
msg.attach(MIMEText(email_content, 'html', 'utf-8'))
# Connect to SMTP server and send
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
if self.check_if_canceled("Email processing"):
return
context = smtplib.ssl.create_default_context()
with smtplib.SMTP(self._param.smtp_server, self._param.smtp_port) as server:
server.ehlo()
server.starttls(context=context)
server.ehlo()
# Login
smtp_username = self._param.smtp_username or self._param.email
logging.info(f"Attempting to login with username: {smtp_username}")
server.login(smtp_username, self._param.password)
# Get all recipient list
recipients = [email_data["to_email"]]
if email_data.get("cc_email"):
recipients.extend(email_data["cc_email"].split(','))
# Send email
logging.info(f"Sending email to recipients: {recipients}")
if self.check_if_canceled("Email processing"):
return
try:
server.send_message(msg, self._param.email, recipients)
success = True
except Exception as e:
logging.error(f"Error during send_message: {str(e)}")
# Try alternative method
server.sendmail(self._param.email, recipients, msg.as_string())
success = True
try:
server.quit()
except Exception as e:
# Ignore errors when closing connection
logging.warning(f"Non-fatal error during connection close: {str(e)}")
self.set_output("success", success)
return success
except json.JSONDecodeError:
error_msg = "Invalid JSON format in input"
logging.error(error_msg)
self.set_output("_ERROR", error_msg)
self.set_output("success", False)
return False
except smtplib.SMTPAuthenticationError:
error_msg = "SMTP Authentication failed. Please check your SMTP username(email) and authorization code."
logging.error(error_msg)
self.set_output("_ERROR", error_msg)
self.set_output("success", False)
return False
except smtplib.SMTPConnectError:
error_msg = f"Failed to connect to SMTP server {self._param.smtp_server}:{self._param.smtp_port}"
logging.error(error_msg)
last_e = error_msg
time.sleep(self._param.delay_after_error)
except smtplib.SMTPException as e:
error_msg = f"SMTP error occurred: {str(e)}"
logging.error(error_msg)
last_e = error_msg
time.sleep(self._param.delay_after_error)
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
logging.error(error_msg)
self.set_output("_ERROR", error_msg)
self.set_output("success", False)
return False
if last_e:
self.set_output("_ERROR", str(last_e))
return False
assert False, self.output()
def thoughts(self) -> str:
inputs = self.get_input()
return """
To: {}
Subject: {}
Your email is on its wayโsit tight!
""".format(inputs.get("to_email", "-_-!"), inputs.get("subject", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/email.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/exesql.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import re
from abc import ABC
import pandas as pd
import pymysql
import psycopg2
import pyodbc
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
from common.connection_utils import timeout
class ExeSQLParam(ToolParamBase):
"""
Define the ExeSQL component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "execute_sql",
"description": "This is a tool that can execute SQL.",
"parameters": {
"sql": {
"type": "string",
"description": "The SQL needs to be executed.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.db_type = "mysql"
self.database = ""
self.username = ""
self.host = ""
self.port = 3306
self.password = ""
self.max_records = 1024
def check(self):
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgres', 'mariadb', 'mssql', 'IBM DB2', 'trino', 'oceanbase'])
self.check_empty(self.database, "Database name")
self.check_empty(self.username, "database username")
self.check_empty(self.host, "IP Address")
self.check_positive_integer(self.port, "IP Port")
if self.db_type != "trino":
self.check_empty(self.password, "Database password")
self.check_positive_integer(self.max_records, "Maximum number of records")
if self.database == "rag_flow":
if self.host == "ragflow-mysql":
raise ValueError("For the security reason, it dose not support database named rag_flow.")
if self.password == "infini_rag_flow":
raise ValueError("For the security reason, it dose not support database named rag_flow.")
def get_input_form(self) -> dict[str, dict]:
return {
"sql": {
"name": "SQL",
"type": "line"
}
}
class ExeSQL(ToolBase, ABC):
component_name = "ExeSQL"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("ExeSQL processing"):
return
def convert_decimals(obj):
from decimal import Decimal
import math
if isinstance(obj, float):
# Handle NaN and Infinity which are not valid JSON values
if math.isnan(obj) or math.isinf(obj):
return None
return obj
if isinstance(obj, Decimal):
return float(obj) # ๆ str(obj)
elif isinstance(obj, dict):
return {k: convert_decimals(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [convert_decimals(item) for item in obj]
return obj
sql = kwargs.get("sql")
if not sql:
raise Exception("SQL for `ExeSQL` MUST not be empty.")
if self.check_if_canceled("ExeSQL processing"):
return
vars = self.get_input_elements_from_text(sql)
args = {}
for k, o in vars.items():
args[k] = o["value"]
if not isinstance(args[k], str):
try:
args[k] = json.dumps(args[k], ensure_ascii=False)
except Exception:
args[k] = str(args[k])
self.set_input_value(k, args[k])
sql = self.string_format(sql, args)
if self.check_if_canceled("ExeSQL processing"):
return
sqls = sql.split(";")
if self._param.db_type in ["mysql", "mariadb"]:
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
port=self._param.port, password=self._param.password)
elif self._param.db_type == 'oceanbase':
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
port=self._param.port, password=self._param.password, charset='utf8mb4')
elif self._param.db_type == 'postgres':
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
port=self._param.port, password=self._param.password)
elif self._param.db_type == 'mssql':
conn_str = (
r'DRIVER={ODBC Driver 17 for SQL Server};'
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
r'DATABASE=' + self._param.database + ';'
r'UID=' + self._param.username + ';'
r'PWD=' + self._param.password
)
db = pyodbc.connect(conn_str)
elif self._param.db_type == 'trino':
try:
import trino
from trino.auth import BasicAuthentication
except Exception:
raise Exception("Missing dependency 'trino'. Please install: pip install trino")
def _parse_catalog_schema(db: str):
if not db:
return None, None
if "." in db:
c, s = db.split(".", 1)
elif "/" in db:
c, s = db.split("/", 1)
else:
c, s = db, "default"
return c, s
catalog, schema = _parse_catalog_schema(self._param.database)
if not catalog:
raise Exception("For Trino, `database` must be 'catalog.schema' or at least 'catalog'.")
http_scheme = "https" if os.environ.get("TRINO_USE_TLS", "0") == "1" else "http"
auth = None
if http_scheme == "https" and self._param.password:
auth = BasicAuthentication(self._param.username, self._param.password)
try:
db = trino.dbapi.connect(
host=self._param.host,
port=int(self._param.port or 8080),
user=self._param.username or "ragflow",
catalog=catalog,
schema=schema or "default",
http_scheme=http_scheme,
auth=auth
)
except Exception as e:
raise Exception("Database Connection Failed! \n" + str(e))
elif self._param.db_type == 'IBM DB2':
import ibm_db
conn_str = (
f"DATABASE={self._param.database};"
f"HOSTNAME={self._param.host};"
f"PORT={self._param.port};"
f"PROTOCOL=TCPIP;"
f"UID={self._param.username};"
f"PWD={self._param.password};"
)
try:
conn = ibm_db.connect(conn_str, "", "")
except Exception as e:
raise Exception("Database Connection Failed! \n" + str(e))
sql_res = []
formalized_content = []
for single_sql in sqls:
if self.check_if_canceled("ExeSQL processing"):
ibm_db.close(conn)
return
single_sql = single_sql.replace("```", "").strip()
if not single_sql:
continue
single_sql = re.sub(r"\[ID:[0-9]+\]", "", single_sql)
stmt = ibm_db.exec_immediate(conn, single_sql)
rows = []
row = ibm_db.fetch_assoc(stmt)
while row and len(rows) < self._param.max_records:
if self.check_if_canceled("ExeSQL processing"):
ibm_db.close(conn)
return
rows.append(row)
row = ibm_db.fetch_assoc(stmt)
if not rows:
sql_res.append({"content": "No record in the database!"})
continue
df = pd.DataFrame(rows)
for col in df.columns:
if pd.api.types.is_datetime64_any_dtype(df[col]):
df[col] = df[col].dt.strftime("%Y-%m-%d")
df = df.where(pd.notnull(df), None)
sql_res.append(convert_decimals(df.to_dict(orient="records")))
formalized_content.append(df.to_markdown(index=False, floatfmt=".6f"))
ibm_db.close(conn)
self.set_output("json", sql_res)
self.set_output("formalized_content", "\n\n".join(formalized_content))
return self.output("formalized_content")
try:
cursor = db.cursor()
except Exception as e:
raise Exception("Database Connection Failed! \n" + str(e))
sql_res = []
formalized_content = []
for single_sql in sqls:
if self.check_if_canceled("ExeSQL processing"):
cursor.close()
db.close()
return
single_sql = single_sql.replace('```','')
if not single_sql:
continue
single_sql = re.sub(r"\[ID:[0-9]+\]", "", single_sql)
cursor.execute(single_sql)
if cursor.rowcount == 0:
sql_res.append({"content": "No record in the database!"})
break
if self._param.db_type == 'mssql':
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.max_records),
columns=[desc[0] for desc in cursor.description])
else:
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.max_records)])
single_res.columns = [i[0] for i in cursor.description]
for col in single_res.columns:
if pd.api.types.is_datetime64_any_dtype(single_res[col]):
single_res[col] = single_res[col].dt.strftime('%Y-%m-%d')
single_res = single_res.where(pd.notnull(single_res), None)
sql_res.append(convert_decimals(single_res.to_dict(orient='records')))
formalized_content.append(single_res.to_markdown(index=False, floatfmt=".6f"))
cursor.close()
db.close()
self.set_output("json", sql_res)
self.set_output("formalized_content", "\n\n".join(formalized_content))
return self.output("formalized_content")
def thoughts(self) -> str:
return "Query sentโwaiting for the data."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/exesql.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/github.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
import requests
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
from common.connection_utils import timeout
class GitHubParam(ToolParamBase):
"""
Define the GitHub component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "github_search",
"description": """GitHub repository search is a feature that enables users to find specific repositories on the GitHub platform. This search functionality allows users to locate projects, codebases, and other content hosted on GitHub based on various criteria.""",
"parameters": {
"query": {
"type": "string",
"description": "The search keywords to execute with GitHub. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.top_n = 10
def check(self):
self.check_positive_integer(self.top_n, "Top N")
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class GitHub(ToolBase, ABC):
component_name = "GitHub"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("GitHub processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("GitHub processing"):
return
try:
url = 'https://api.github.com/search/repositories?q=' + kwargs["query"] + '&sort=stars&order=desc&per_page=' + str(
self._param.top_n)
headers = {"Content-Type": "application/vnd.github+json", "X-GitHub-Api-Version": '2022-11-28'}
response = requests.get(url=url, headers=headers).json()
if self.check_if_canceled("GitHub processing"):
return
self._retrieve_chunks(response['items'],
get_title=lambda r: r["name"],
get_url=lambda r: r["html_url"],
get_content=lambda r: str(r["description"]) + '\n stars:' + str(r['watchers']))
self.set_output("json", response['items'])
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("GitHub processing"):
return
last_e = e
logging.exception(f"GitHub error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"GitHub error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return "Scanning GitHub repos related to `{}`.".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/github.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/googlescholar.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
from scholarly import scholarly
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
from common.connection_utils import timeout
class GoogleScholarParam(ToolParamBase):
"""
Define the GoogleScholar component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "google_scholar_search",
"description": """Google Scholar provides a simple way to broadly search for scholarly literature. From one place, you can search across many disciplines and sources: articles, theses, books, abstracts and court opinions, from academic publishers, professional societies, online repositories, universities and other web sites. Google Scholar helps you find relevant work across the world of scholarly research.""",
"parameters": {
"query": {
"type": "string",
"description": "The search keyword to execute with Google Scholar. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.top_n = 12
self.sort_by = 'relevance'
self.year_low = None
self.year_high = None
self.patents = True
def check(self):
self.check_positive_integer(self.top_n, "Top N")
self.check_valid_value(self.sort_by, "GoogleScholar Sort_by", ['date', 'relevance'])
self.check_boolean(self.patents, "Whether or not to include patents, defaults to True")
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class GoogleScholar(ToolBase, ABC):
component_name = "GoogleScholar"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("GoogleScholar processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("GoogleScholar processing"):
return
try:
scholar_client = scholarly.search_pubs(kwargs["query"], patents=self._param.patents, year_low=self._param.year_low,
year_high=self._param.year_high, sort_by=self._param.sort_by)
if self.check_if_canceled("GoogleScholar processing"):
return
self._retrieve_chunks(scholar_client,
get_title=lambda r: r['bib']['title'],
get_url=lambda r: r["pub_url"],
get_content=lambda r: "\n author: " + ",".join(r['bib']['author']) + '\n Abstract: ' + r['bib'].get('abstract', 'no abstract')
)
self.set_output("json", list(scholar_client))
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("GoogleScholar processing"):
return
last_e = e
logging.exception(f"GoogleScholar error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"GoogleScholar error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return "Looking for scholarly papers on `{}`,โ prioritising reputable sources.".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/googlescholar.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/pubmed.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
from Bio import Entrez
import re
import xml.etree.ElementTree as ET
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
from common.connection_utils import timeout
class PubMedParam(ToolParamBase):
"""
Define the PubMed component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "pubmed_search",
"description": """
PubMed is an openly accessible, free database which includes primarily the MEDLINE database of references and abstracts on life sciences and biomedical topics.
In addition to MEDLINE, PubMed provides access to:
- older references from the print version of Index Medicus, back to 1951 and earlier
- references to some journals before they were indexed in Index Medicus and MEDLINE, for instance Science, BMJ, and Annals of Surgery
- very recent entries to records for an article before it is indexed with Medical Subject Headings (MeSH) and added to MEDLINE
- a collection of books available full-text and other subsets of NLM records[4]
- PMC citations
- NCBI Bookshelf
""",
"parameters": {
"query": {
"type": "string",
"description": "The search keywords to execute with PubMed. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.top_n = 12
self.email = "A.N.Other@example.com"
def check(self):
self.check_positive_integer(self.top_n, "Top N")
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class PubMed(ToolBase, ABC):
component_name = "PubMed"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("PubMed processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("PubMed processing"):
return
try:
Entrez.email = self._param.email
pubmedids = Entrez.read(Entrez.esearch(db='pubmed', retmax=self._param.top_n, term=kwargs["query"]))['IdList']
if self.check_if_canceled("PubMed processing"):
return
pubmedcnt = ET.fromstring(re.sub(r'<(/?)b>|<(/?)i>', '', Entrez.efetch(db='pubmed', id=",".join(pubmedids),
retmode="xml").read().decode("utf-8")))
if self.check_if_canceled("PubMed processing"):
return
self._retrieve_chunks(pubmedcnt.findall("PubmedArticle"),
get_title=lambda child: child.find("MedlineCitation").find("Article").find("ArticleTitle").text,
get_url=lambda child: "https://pubmed.ncbi.nlm.nih.gov/" + child.find("MedlineCitation").find("PMID").text,
get_content=lambda child: self._format_pubmed_content(child),)
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("PubMed processing"):
return
last_e = e
logging.exception(f"PubMed error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"PubMed error: {last_e}"
assert False, self.output()
def _format_pubmed_content(self, child):
"""Extract structured reference info from PubMed XML"""
def safe_find(path):
node = child
for p in path.split("/"):
if node is None:
return None
node = node.find(p)
return node.text if node is not None and node.text else None
title = safe_find("MedlineCitation/Article/ArticleTitle") or "No title"
abstract = safe_find("MedlineCitation/Article/Abstract/AbstractText") or "No abstract available"
journal = safe_find("MedlineCitation/Article/Journal/Title") or "Unknown Journal"
volume = safe_find("MedlineCitation/Article/Journal/JournalIssue/Volume") or "-"
issue = safe_find("MedlineCitation/Article/Journal/JournalIssue/Issue") or "-"
pages = safe_find("MedlineCitation/Article/Pagination/MedlinePgn") or "-"
# Authors
authors = []
for author in child.findall(".//AuthorList/Author"):
lastname = safe_find("LastName") or ""
forename = safe_find("ForeName") or ""
fullname = f"{forename} {lastname}".strip()
if fullname:
authors.append(fullname)
authors_str = ", ".join(authors) if authors else "Unknown Authors"
# DOI
doi = None
for eid in child.findall(".//ArticleId"):
if eid.attrib.get("IdType") == "doi":
doi = eid.text
break
return (
f"Title: {title}\n"
f"Authors: {authors_str}\n"
f"Journal: {journal}\n"
f"Volume: {volume}\n"
f"Issue: {issue}\n"
f"Pages: {pages}\n"
f"DOI: {doi or '-'}\n"
f"Abstract: {abstract.strip()}"
)
def thoughts(self) -> str:
return "Looking for scholarly papers on `{}`,โ prioritising reputable sources.".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/pubmed.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/retrieval.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from functools import partial
import json
import os
import re
from abc import ABC
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
from common.constants import LLMType
from api.db.services.doc_metadata_service import DocMetadataService
from common.metadata_utils import apply_meta_data_filter
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.llm_service import LLMBundle
from api.db.services.memory_service import MemoryService
from api.db.joint_services import memory_message_service
from common import settings
from common.connection_utils import timeout
from rag.app.tag import label_question
from rag.prompts.generator import cross_languages, kb_prompt, memory_prompt
class RetrievalParam(ToolParamBase):
"""
Define the Retrieval component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "search_my_dateset",
"description": "This tool can be utilized for relevant content searching in the datasets.",
"parameters": {
"query": {
"type": "string",
"description": "The keywords to search the dataset. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "",
"required": True
}
}
}
super().__init__()
self.function_name = "search_my_dateset"
self.description = "This tool can be utilized for relevant content searching in the datasets."
self.similarity_threshold = 0.2
self.keywords_similarity_weight = 0.5
self.top_n = 8
self.top_k = 1024
self.kb_ids = []
self.memory_ids = []
self.kb_vars = []
self.rerank_id = ""
self.empty_response = ""
self.use_kg = False
self.cross_languages = []
self.toc_enhance = False
self.meta_data_filter={}
def check(self):
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
self.check_positive_number(self.top_n, "[Retrieval] Top N")
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class Retrieval(ToolBase, ABC):
component_name = "Retrieval"
async def _retrieve_kb(self, query_text: str):
kb_ids: list[str] = []
for id in self._param.kb_ids:
if id.find("@") < 0:
kb_ids.append(id)
continue
kb_nm = self._canvas.get_variable_value(id)
# if kb_nm is a list
kb_nm_list = kb_nm if isinstance(kb_nm, list) else [kb_nm]
for nm_or_id in kb_nm_list:
e, kb = KnowledgebaseService.get_by_name(nm_or_id,
self._canvas._tenant_id)
if not e:
e, kb = KnowledgebaseService.get_by_id(nm_or_id)
if not e:
raise Exception(f"Dataset({nm_or_id}) does not exist.")
kb_ids.append(kb.id)
filtered_kb_ids: list[str] = list(set([kb_id for kb_id in kb_ids if kb_id]))
kbs = KnowledgebaseService.get_by_ids(filtered_kb_ids)
if not kbs:
raise Exception("No dataset is selected.")
embd_nms = list(set([kb.embd_id for kb in kbs]))
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
embd_mdl = None
if embd_nms:
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
rerank_mdl = None
if self._param.rerank_id:
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
vars = self.get_input_elements_from_text(query_text)
vars = {k: o["value"] for k, o in vars.items()}
query = self.string_format(query_text, vars)
doc_ids = []
if self._param.meta_data_filter != {}:
metas = DocMetadataService.get_flatted_meta_by_kbs(kb_ids)
def _resolve_manual_filter(flt: dict) -> dict:
pat = re.compile(self.variable_ref_patt)
s = flt.get("value", "")
out_parts = []
last = 0
for m in pat.finditer(s):
out_parts.append(s[last:m.start()])
key = m.group(1)
v = self._canvas.get_variable_value(key)
if v is None:
rep = ""
elif isinstance(v, partial):
buf = []
for chunk in v():
buf.append(chunk)
rep = "".join(buf)
elif isinstance(v, str):
rep = v
else:
rep = json.dumps(v, ensure_ascii=False)
out_parts.append(rep)
last = m.end()
out_parts.append(s[last:])
flt["value"] = "".join(out_parts)
return flt
chat_mdl = None
if self._param.meta_data_filter.get("method") in ["auto", "semi_auto"]:
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT)
doc_ids = await apply_meta_data_filter(
self._param.meta_data_filter,
metas,
query,
chat_mdl,
doc_ids,
_resolve_manual_filter if self._param.meta_data_filter.get("method") == "manual" else None,
)
if self._param.cross_languages:
query = await cross_languages(kbs[0].tenant_id, None, query, self._param.cross_languages)
if kbs:
query = re.sub(r"^user[:๏ผ\s]*", "", query, flags=re.IGNORECASE)
kbinfos = await settings.retriever.retrieval(
query,
embd_mdl,
[kb.tenant_id for kb in kbs],
filtered_kb_ids,
1,
self._param.top_n,
self._param.similarity_threshold,
1 - self._param.keywords_similarity_weight,
doc_ids=doc_ids,
aggs=False,
rerank_mdl=rerank_mdl,
rank_feature=label_question(query, kbs),
)
if self.check_if_canceled("Retrieval processing"):
return
if self._param.toc_enhance:
chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT)
cks = await settings.retriever.retrieval_by_toc(query, kbinfos["chunks"], [kb.tenant_id for kb in kbs],
chat_mdl, self._param.top_n)
if self.check_if_canceled("Retrieval processing"):
return
if cks:
kbinfos["chunks"] = cks
kbinfos["chunks"] = settings.retriever.retrieval_by_children(kbinfos["chunks"],
[kb.tenant_id for kb in kbs])
if self._param.use_kg:
ck = await settings.kg_retriever.retrieval(query,
[kb.tenant_id for kb in kbs],
kb_ids,
embd_mdl,
LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT))
if self.check_if_canceled("Retrieval processing"):
return
if ck["content_with_weight"]:
kbinfos["chunks"].insert(0, ck)
else:
kbinfos = {"chunks": [], "doc_aggs": []}
if self._param.use_kg and kbs:
ck = await settings.kg_retriever.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl,
LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
if self.check_if_canceled("Retrieval processing"):
return
if ck["content_with_weight"]:
ck["content"] = ck["content_with_weight"]
del ck["content_with_weight"]
kbinfos["chunks"].insert(0, ck)
for ck in kbinfos["chunks"]:
if "vector" in ck:
del ck["vector"]
if "content_ltks" in ck:
del ck["content_ltks"]
if not kbinfos["chunks"]:
self.set_output("formalized_content", self._param.empty_response)
return
# Format the chunks for JSON output (similar to how other tools do it)
json_output = kbinfos["chunks"].copy()
self._canvas.add_reference(kbinfos["chunks"], kbinfos["doc_aggs"])
form_cnt = "\n".join(kb_prompt(kbinfos, 200000, True))
# Set both formalized content and JSON output
self.set_output("formalized_content", form_cnt)
self.set_output("json", json_output)
return form_cnt
async def _retrieve_memory(self, query_text: str):
memory_ids: list[str] = [memory_id for memory_id in self._param.memory_ids]
memory_list = MemoryService.get_by_ids(memory_ids)
if not memory_list:
raise Exception("No memory is selected.")
embd_names = list({memory.embd_id for memory in memory_list})
assert len(embd_names) == 1, "Memory use different embedding models."
vars = self.get_input_elements_from_text(query_text)
vars = {k: o["value"] for k, o in vars.items()}
query = self.string_format(query_text, vars)
# query message
message_list = memory_message_service.query_message({"memory_id": memory_ids}, {
"query": query,
"similarity_threshold": self._param.similarity_threshold,
"keywords_similarity_weight": self._param.keywords_similarity_weight,
"top_n": self._param.top_n
})
if not message_list:
self.set_output("formalized_content", self._param.empty_response)
return ""
formated_content = "\n".join(memory_prompt(message_list, 200000))
# set formalized_content output
self.set_output("formalized_content", formated_content)
return formated_content
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
async def _invoke_async(self, **kwargs):
if self.check_if_canceled("Retrieval processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", self._param.empty_response)
return
if hasattr(self._param, "retrieval_from") and self._param.retrieval_from == "dataset":
return await self._retrieve_kb(kwargs["query"])
elif hasattr(self._param, "retrieval_from") and self._param.retrieval_from == "memory":
return await self._retrieve_memory(kwargs["query"])
elif self._param.kb_ids:
return await self._retrieve_kb(kwargs["query"])
elif hasattr(self._param, "memory_ids") and self._param.memory_ids:
return await self._retrieve_memory(kwargs["query"])
else:
self.set_output("formalized_content", self._param.empty_response)
return
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
return asyncio.run(self._invoke_async(**kwargs))
def thoughts(self) -> str:
return """
Keywords: {}
Looking for the most relevant articles.
""".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/retrieval.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/tavily.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
from tavily import TavilyClient
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
from common.connection_utils import timeout
class TavilySearchParam(ToolParamBase):
"""
Define the Retrieval component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "tavily_search",
"description": """
Tavily is a search engine optimized for LLMs, aimed at efficient, quick and persistent search results.
When searching:
- Start with specific query which should focus on just a single aspect.
- Number of keywords in query should be less than 5.
- Broaden search terms if needed
- Cross-reference information from multiple sources
""",
"parameters": {
"query": {
"type": "string",
"description": "The search keywords to execute with Tavily. The keywords should be the most important words/terms(includes synonyms) from the original request.",
"default": "{sys.query}",
"required": True
},
"topic": {
"type": "string",
"description": "default:general. The category of the search.news is useful for retrieving real-time updates, particularly about politics, sports, and major current events covered by mainstream media sources. general is for broader, more general-purpose searches that may include a wide range of sources.",
"enum": ["general", "news"],
"default": "general",
"required": False,
},
"include_domains": {
"type": "array",
"description": "default:[]. A list of domains only from which the search results can be included.",
"default": [],
"items": {
"type": "string",
"description": "Domain name that must be included, e.g. www.yahoo.com"
},
"required": False
},
"exclude_domains": {
"type": "array",
"description": "default:[]. A list of domains from which the search results can not be included",
"default": [],
"items": {
"type": "string",
"description": "Domain name that must be excluded, e.g. www.yahoo.com"
},
"required": False
},
}
}
super().__init__()
self.api_key = ""
self.search_depth = "basic" # basic/advanced
self.max_results = 6
self.days = 14
self.include_answer = False
self.include_raw_content = False
self.include_images = False
self.include_image_descriptions = False
def check(self):
self.check_valid_value(self.topic, "Tavily topic: should be in 'general/news'", ["general", "news"])
self.check_valid_value(self.search_depth, "Tavily search depth should be in 'basic/advanced'", ["basic", "advanced"])
self.check_positive_integer(self.max_results, "Tavily max result number should be within [1๏ผ 20]")
self.check_positive_integer(self.days, "Tavily days should be greater than 1")
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class TavilySearch(ToolBase, ABC):
component_name = "TavilySearch"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("TavilySearch processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
self.tavily_client = TavilyClient(api_key=self._param.api_key)
last_e = None
for fld in ["search_depth", "topic", "max_results", "days", "include_answer", "include_raw_content", "include_images", "include_image_descriptions", "include_domains", "exclude_domains"]:
if fld not in kwargs:
kwargs[fld] = getattr(self._param, fld)
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("TavilySearch processing"):
return
try:
kwargs["include_images"] = False
kwargs["include_raw_content"] = False
res = self.tavily_client.search(**kwargs)
if self.check_if_canceled("TavilySearch processing"):
return
self._retrieve_chunks(res["results"],
get_title=lambda r: r["title"],
get_url=lambda r: r["url"],
get_content=lambda r: r["raw_content"] if r["raw_content"] else r["content"],
get_score=lambda r: r["score"])
self.set_output("json", res["results"])
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("TavilySearch processing"):
return
last_e = e
logging.exception(f"Tavily error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"Tavily error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return """
Keywords: {}
Looking for the most relevant articles.
""".format(self.get_input().get("query", "-_-!"))
class TavilyExtractParam(ToolParamBase):
"""
Define the Retrieval component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "tavily_extract",
"description": "Extract web page content from one or more specified URLs using Tavily Extract.",
"parameters": {
"urls": {
"type": "array",
"description": "The URLs to extract content from.",
"default": "",
"items": {
"type": "string",
"description": "The URL to extract content from, e.g. www.yahoo.com"
},
"required": True
},
"extract_depth": {
"type": "string",
"description": "The depth of the extraction process. advanced extraction retrieves more data, including tables and embedded content, with higher success but may increase latency.basic extraction costs 1 credit per 5 successful URL extractions, while advanced extraction costs 2 credits per 5 successful URL extractions.",
"enum": ["basic", "advanced"],
"default": "basic",
"required": False,
},
"format": {
"type": "string",
"description": "The format of the extracted web page content. markdown returns content in markdown format. text returns plain text and may increase latency.",
"enum": ["markdown", "text"],
"default": "markdown",
"required": False,
}
}
}
super().__init__()
self.api_key = ""
self.extract_depth = "basic" # basic/advanced
self.urls = []
self.format = "markdown"
self.include_images = False
def check(self):
self.check_valid_value(self.extract_depth, "Tavily extract depth should be in 'basic/advanced'", ["basic", "advanced"])
self.check_valid_value(self.format, "Tavily extract format should be in 'markdown/text'", ["markdown", "text"])
def get_input_form(self) -> dict[str, dict]:
return {
"urls": {
"name": "URLs",
"type": "line"
}
}
class TavilyExtract(ToolBase, ABC):
component_name = "TavilyExtract"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("TavilyExtract processing"):
return
self.tavily_client = TavilyClient(api_key=self._param.api_key)
last_e = None
for fld in ["urls", "extract_depth", "format"]:
if fld not in kwargs:
kwargs[fld] = getattr(self._param, fld)
if kwargs.get("urls") and isinstance(kwargs["urls"], str):
kwargs["urls"] = kwargs["urls"].split(",")
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("TavilyExtract processing"):
return
try:
kwargs["include_images"] = False
res = self.tavily_client.extract(**kwargs)
if self.check_if_canceled("TavilyExtract processing"):
return
self.set_output("json", res["results"])
return self.output("json")
except Exception as e:
if self.check_if_canceled("TavilyExtract processing"):
return
last_e = e
logging.exception(f"Tavily error: {e}")
if last_e:
self.set_output("_ERROR", str(last_e))
return f"Tavily error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return "Opened {}โpulling out the main textโฆ".format(self.get_input().get("urls", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/tavily.py",
"license": "Apache License 2.0",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/wencai.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
import pandas as pd
# import pywencai
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
from common.connection_utils import timeout
class WenCaiParam(ToolParamBase):
"""
Define the WenCai component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "iwencai",
"description": """
iwencai search: search platform is committed to providing hundreds of millions of investors with the most timely, accurate and comprehensive information, covering news, announcements, research reports, blogs, forums, Weibo, characters, etc.
robo-advisor intelligent stock selection platform: through AI technology, is committed to providing investors with intelligent stock selection, quantitative investment, main force tracking, value investment, technical analysis and other types of stock selection technologies.
fund selection platform: through AI technology, is committed to providing excellent fund, value investment, quantitative analysis and other fund selection technologies for foundation citizens.
""",
"parameters": {
"query": {
"type": "string",
"description": "The question/conditions to select stocks.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.top_n = 10
self.query_type = "stock"
def check(self):
self.check_positive_integer(self.top_n, "Top N")
self.check_valid_value(self.query_type, "Query type",
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
'futures', 'lccp',
'foreign_exchange'])
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class WenCai(ToolBase, ABC):
component_name = "WenCai"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
def _invoke(self, **kwargs):
if self.check_if_canceled("WenCai processing"):
return
if not kwargs.get("query"):
self.set_output("report", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("WenCai processing"):
return
try:
wencai_res = []
# res = pywencai.get(query=kwargs["query"], query_type=self._param.query_type, perpage=self._param.top_n)
res = []
if self.check_if_canceled("WenCai processing"):
return
if isinstance(res, pd.DataFrame):
wencai_res.append(res.to_markdown())
elif isinstance(res, dict):
for item in res.items():
if self.check_if_canceled("WenCai processing"):
return
if isinstance(item[1], list):
wencai_res.append(item[0] + "\n" + pd.DataFrame(item[1]).to_markdown())
elif isinstance(item[1], str):
wencai_res.append(item[0] + "\n" + item[1])
elif isinstance(item[1], dict):
if "meta" in item[1].keys():
continue
wencai_res.append(pd.DataFrame.from_dict(item[1], orient='index').to_markdown())
elif isinstance(item[1], pd.DataFrame):
if "image_url" in item[1].columns:
continue
wencai_res.append(item[1].to_markdown())
else:
wencai_res.append(item[0] + "\n" + str(item[1]))
self.set_output("report", "\n\n".join(wencai_res))
return self.output("report")
except Exception as e:
if self.check_if_canceled("WenCai processing"):
return
last_e = e
logging.exception(f"WenCai error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"WenCai error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return "Pulling live financial data for `{}`.".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/wencai.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/wikipedia.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
import wikipedia
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
from common.connection_utils import timeout
class WikipediaParam(ToolParamBase):
"""
Define the Wikipedia component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "wikipedia_search",
"description": """A wide range of how-to and information pages are made available in wikipedia. Since 2001, it has grown rapidly to become the world's largest reference website. From Wikipedia, the free encyclopedia.""",
"parameters": {
"query": {
"type": "string",
"description": "The search keyword to execute with wikipedia. The keyword MUST be a specific subject that can match the title.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.top_n = 10
self.language = "en"
def check(self):
self.check_positive_integer(self.top_n, "Top N")
self.check_valid_value(self.language, "Wikipedia languages",
['af', 'pl', 'ar', 'ast', 'az', 'bg', 'nan', 'bn', 'be', 'ca', 'cs', 'cy', 'da', 'de',
'et', 'el', 'en', 'es', 'eo', 'eu', 'fa', 'fr', 'gl', 'ko', 'hy', 'hi', 'hr', 'id',
'it', 'he', 'ka', 'lld', 'la', 'lv', 'lt', 'hu', 'mk', 'arz', 'ms', 'min', 'my', 'nl',
'ja', 'nb', 'nn', 'ce', 'uz', 'pt', 'kk', 'ro', 'ru', 'ceb', 'sk', 'sl', 'sr', 'sh',
'fi', 'sv', 'ta', 'tt', 'th', 'tg', 'azb', 'tr', 'uk', 'ur', 'vi', 'war', 'zh', 'yue'])
def get_input_form(self) -> dict[str, dict]:
return {
"query": {
"name": "Query",
"type": "line"
}
}
class Wikipedia(ToolBase, ABC):
component_name = "Wikipedia"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("Wikipedia processing"):
return
if not kwargs.get("query"):
self.set_output("formalized_content", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("Wikipedia processing"):
return
try:
wikipedia.set_lang(self._param.language)
wiki_engine = wikipedia
pages = []
for p in wiki_engine.search(kwargs["query"], results=self._param.top_n):
if self.check_if_canceled("Wikipedia processing"):
return
try:
pages.append(wikipedia.page(p))
except Exception:
pass
self._retrieve_chunks(pages,
get_title=lambda r: r.title,
get_url=lambda r: r.url,
get_content=lambda r: r.summary)
return self.output("formalized_content")
except Exception as e:
if self.check_if_canceled("Wikipedia processing"):
return
last_e = e
logging.exception(f"Wikipedia error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"Wikipedia error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return """
Keywords: {}
Looking for the most relevant articles.
""".format(self.get_input().get("query", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/wikipedia.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/tools/yahoofinance.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from abc import ABC
import pandas as pd
import yfinance as yf
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
from common.connection_utils import timeout
class YahooFinanceParam(ToolParamBase):
"""
Define the YahooFinance component parameters.
"""
def __init__(self):
self.meta:ToolMeta = {
"name": "yahoo_finance",
"description": "The Yahoo Finance is a service that provides access to real-time and historical stock market data. It enables users to fetch various types of stock information, such as price quotes, historical prices, company profiles, and financial news. The API offers structured data, allowing developers to integrate market data into their applications and analysis tools.",
"parameters": {
"stock_code": {
"type": "string",
"description": "The stock code or company name.",
"default": "{sys.query}",
"required": True
}
}
}
super().__init__()
self.info = True
self.history = False
self.count = False
self.financials = False
self.income_stmt = False
self.balance_sheet = False
self.cash_flow_statement = False
self.news = True
def check(self):
self.check_boolean(self.info, "get all stock info")
self.check_boolean(self.history, "get historical market data")
self.check_boolean(self.count, "show share count")
self.check_boolean(self.financials, "show financials")
self.check_boolean(self.income_stmt, "income statement")
self.check_boolean(self.balance_sheet, "balance sheet")
self.check_boolean(self.cash_flow_statement, "cash flow statement")
self.check_boolean(self.news, "show news")
def get_input_form(self) -> dict[str, dict]:
return {
"stock_code": {
"name": "Stock code/Company name",
"type": "line"
}
}
class YahooFinance(ToolBase, ABC):
component_name = "YahooFinance"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("YahooFinance processing"):
return None
if not kwargs.get("stock_code"):
self.set_output("report", "")
return ""
last_e = ""
for _ in range(self._param.max_retries+1):
if self.check_if_canceled("YahooFinance processing"):
return None
yahoo_res = []
try:
msft = yf.Ticker(kwargs["stock_code"])
if self.check_if_canceled("YahooFinance processing"):
return None
if self._param.info:
yahoo_res.append("# Information:\n" + pd.Series(msft.info).to_markdown() + "\n")
if self._param.history:
yahoo_res.append("# History:\n" + msft.history().to_markdown() + "\n")
if self._param.financials:
yahoo_res.append("# Calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n")
if self._param.balance_sheet:
yahoo_res.append("# Balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n")
yahoo_res.append("# Quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n")
if self._param.cash_flow_statement:
yahoo_res.append("# Cash flow statement:\n" + msft.cashflow.to_markdown() + "\n")
yahoo_res.append("# Quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n")
if self._param.news:
yahoo_res.append("# News:\n" + pd.DataFrame(msft.news).to_markdown() + "\n")
self.set_output("report", "\n\n".join(yahoo_res))
return self.output("report")
except Exception as e:
if self.check_if_canceled("YahooFinance processing"):
return None
last_e = e
logging.exception(f"YahooFinance error: {e}")
time.sleep(self._param.delay_after_error)
if last_e:
self.set_output("_ERROR", str(last_e))
return f"YahooFinance error: {last_e}"
assert False, self.output()
def thoughts(self) -> str:
return "Pulling live financial data for `{}`.".format(self.get_input().get("stock_code", "-_-!"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/tools/yahoofinance.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/utils/base64_image.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/base64_image.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_create_dialog.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import CHAT_ASSISTANT_NAME_LIMIT, INVALID_API_TOKEN
from hypothesis import example, given, settings
from libs.auth import RAGFlowWebApiAuth
from utils.hypothesis_utils import valid_names
from common import create_dialog
@pytest.mark.usefixtures("clear_dialogs")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
payload = {"name": "auth_test", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = create_dialog(invalid_auth, payload)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("clear_dialogs")
class TestCapability:
@pytest.mark.p3
def test_create_dialog_100(self, WebApiAuth):
for i in range(100):
payload = {"name": f"dialog_{i}", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, f"Failed to create dialog {i}"
@pytest.mark.p3
def test_create_dialog_concurrent(self, WebApiAuth):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(create_dialog, WebApiAuth, {"name": f"dialog_{i}", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.usefixtures("clear_dialogs")
class TestDialogCreate:
@pytest.mark.p1
@given(name=valid_names())
@example("a" * CHAT_ASSISTANT_NAME_LIMIT)
@settings(max_examples=20)
def test_name(self, WebApiAuth, name):
payload = {"name": name, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_code, expected_message",
[
("", 102, "Dialog name can't be empty."),
(" ", 102, "Dialog name can't be empty."),
("a" * (CHAT_ASSISTANT_NAME_LIMIT + 1), 102, "Dialog name length is 256 which is larger than 255"),
(0, 102, "Dialog name must be string."),
(None, 102, "Dialog name must be string."),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, WebApiAuth, name, expected_code, expected_message):
payload = {"name": name, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.p1
def test_prompt_config_required(self, WebApiAuth):
payload = {"name": "test_dialog"}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 101, res
assert res["message"] == "required argument are missing: prompt_config; ", res
@pytest.mark.p1
def test_prompt_config_with_knowledge_no_kb(self, WebApiAuth):
payload = {"name": "test_dialog", "prompt_config": {"system": "You are a helpful assistant. Use this knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p1
def test_prompt_config_parameter_not_used(self, WebApiAuth):
payload = {"name": "test_dialog", "prompt_config": {"system": "You are a helpful assistant.", "parameters": [{"key": "unused_param", "optional": False}]}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 102, res
assert "Parameter 'unused_param' is not used" in res["message"], res
@pytest.mark.p1
def test_create_with_kb_ids(self, WebApiAuth, add_dataset_func):
dataset_id = add_dataset_func
payload = {
"name": "test_dialog_with_kb",
"kb_ids": [dataset_id],
"prompt_config": {"system": "You are a helpful assistant. Use this knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["kb_ids"] == [dataset_id], res
@pytest.mark.p2
def test_create_with_all_parameters(self, WebApiAuth, add_dataset_func):
dataset_id = add_dataset_func
payload = {
"name": "comprehensive_dialog",
"description": "A comprehensive test dialog",
"icon": "๐ค",
"kb_ids": [dataset_id],
"top_n": 10,
"top_k": 2048,
"rerank_id": "",
"similarity_threshold": 0.2,
"vector_similarity_weight": 0.5,
"llm_setting": {"model": "gpt-4", "temperature": 0.8, "max_tokens": 1000},
"prompt_config": {"system": "You are a helpful assistant. Use this knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
data = res["data"]
assert data["name"] == "comprehensive_dialog", res
assert data["description"] == "A comprehensive test dialog", res
assert data["icon"] == "๐ค", res
assert data["kb_ids"] == [dataset_id], res
assert data["top_n"] == 10, res
assert data["top_k"] == 2048, res
assert data["similarity_threshold"] == 0.2, res
assert data["vector_similarity_weight"] == 0.5, res
@pytest.mark.p3
def test_name_duplicated(self, WebApiAuth):
name = "duplicated_dialog"
payload = {"name": name, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p2
def test_optional_parameters(self, WebApiAuth):
payload = {
"name": "test_optional_params",
"prompt_config": {"system": "You are a helpful assistant. Optional param: {optional_param}", "parameters": [{"key": "optional_param", "optional": True}]},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_create_dialog.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_delete_dialogs.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_create_dialogs, create_dialog, delete_dialog, list_dialogs
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.usefixtures("clear_dialogs")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_ids": [dialog_id]}
res = delete_dialog(invalid_auth, payload)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDialogDelete:
@pytest.mark.p1
def test_delete_single_dialog(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
payload = {"dialog_ids": [dialog_id]}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"] is True, res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 0, res
@pytest.mark.p1
def test_delete_multiple_dialogs(self, WebApiAuth, add_dialogs_func):
_, dialog_ids = add_dialogs_func
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
payload = {"dialog_ids": dialog_ids}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"] is True, res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 0, res
@pytest.mark.p1
def test_delete_partial_dialogs(self, WebApiAuth, add_dialogs_func):
_, dialog_ids = add_dialogs_func
dialogs_to_delete = dialog_ids[:3]
payload = {"dialog_ids": dialogs_to_delete}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"] is True, res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 2, res
remaining_ids = [dialog["id"] for dialog in res["data"]]
for dialog_id in dialog_ids[3:]:
assert dialog_id in remaining_ids, res
@pytest.mark.p2
def test_delete_nonexistent_dialog(self, WebApiAuth):
fake_dialog_id = "nonexistent_dialog_id"
payload = {"dialog_ids": [fake_dialog_id]}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 103, res
assert "Only owner of dialog authorized for this operation." in res["message"], res
@pytest.mark.p2
def test_delete_empty_dialog_ids(self, WebApiAuth):
payload = {"dialog_ids": []}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p2
def test_delete_missing_dialog_ids(self, WebApiAuth):
payload = {}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 101, res
assert res["message"] == "required argument are missing: dialog_ids; ", res
@pytest.mark.p2
def test_delete_invalid_dialog_ids_format(self, WebApiAuth):
payload = {"dialog_ids": "not_a_list"}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 103, res
assert res["message"] == "Only owner of dialog authorized for this operation.", res
@pytest.mark.p2
def test_delete_mixed_valid_invalid_dialogs(self, WebApiAuth, add_dialog_func):
_, valid_dialog_id = add_dialog_func
invalid_dialog_id = "nonexistent_dialog_id"
payload = {"dialog_ids": [valid_dialog_id, invalid_dialog_id]}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 103, res
assert res["message"] == "Only owner of dialog authorized for this operation.", res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
@pytest.mark.p3
def test_delete_dialog_concurrent(self, WebApiAuth, add_dialogs_func):
_, dialog_ids = add_dialogs_func
count = len(dialog_ids)
with ThreadPoolExecutor(max_workers=3) as executor:
futures = [executor.submit(delete_dialog, WebApiAuth, {"dialog_ids": [dialog_id]}) for dialog_id in dialog_ids]
responses = [future.result() for future in as_completed(futures)]
successful_deletions = sum(1 for response in responses if response["code"] == 0)
assert successful_deletions > 0, "No dialogs were successfully deleted"
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == count - successful_deletions, res
@pytest.mark.p3
def test_delete_dialog_idempotent(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_ids": [dialog_id]}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p3
def test_delete_large_batch_dialogs(self, WebApiAuth, add_document):
dataset_id, _ = add_document
dialog_ids = batch_create_dialogs(WebApiAuth, 50, [dataset_id])
assert len(dialog_ids) == 50, "Failed to create 50 dialogs"
payload = {"dialog_ids": dialog_ids}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"] is True, res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 0, res
@pytest.mark.p3
def test_delete_dialog_with_special_characters(self, WebApiAuth):
payload = {"name": "Dialog with ็นๆฎๅญ็ฌฆ and รฉmojis ๐ค", "description": "Test dialog with special characters", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
create_res = create_dialog(WebApiAuth, payload)
assert create_res["code"] == 0, create_res
dialog_id = create_res["data"]["id"]
delete_payload = {"dialog_ids": [dialog_id]}
res = delete_dialog(WebApiAuth, delete_payload)
assert res["code"] == 0, res
assert res["data"] is True, res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 0, res
@pytest.mark.p3
def test_delete_dialog_preserves_other_user_dialogs(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_ids": [dialog_id]}
res = delete_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_delete_dialogs.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_dialog_edge_cases.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import create_dialog, delete_dialog, get_dialog, update_dialog
@pytest.mark.usefixtures("clear_dialogs")
class TestDialogEdgeCases:
@pytest.mark.p2
def test_create_dialog_with_tavily_api_key(self, WebApiAuth):
"""Test creating dialog with Tavily API key instead of dataset"""
payload = {
"name": "tavily_dialog",
"prompt_config": {"system": "You are a helpful assistant. Use this knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}], "tavily_api_key": "test_tavily_key"},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.skip
@pytest.mark.p2
def test_create_dialog_with_different_embedding_models(self, WebApiAuth):
"""Test creating dialog with knowledge bases that have different embedding models"""
# This test would require creating datasets with different embedding models
# For now, we'll test the error case with a mock scenario
payload = {
"name": "mixed_embedding_dialog",
"kb_ids": ["kb_with_model_a", "kb_with_model_b"],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
res = create_dialog(WebApiAuth, payload)
# This should fail due to different embedding models
assert res["code"] == 102, res
assert "Datasets use different embedding models" in res["message"], res
@pytest.mark.p2
def test_create_dialog_with_extremely_long_system_prompt(self, WebApiAuth):
"""Test creating dialog with very long system prompt"""
long_prompt = "You are a helpful assistant. " * 1000
payload = {"name": "long_prompt_dialog", "prompt_config": {"system": long_prompt, "parameters": []}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p2
def test_create_dialog_with_unicode_characters(self, WebApiAuth):
"""Test creating dialog with Unicode characters in various fields"""
payload = {
"name": "Unicodeๆต่ฏๅฏน่ฏ๐ค",
"description": "ๆต่ฏUnicodeๅญ็ฌฆๆฏๆ with รฉmojis ๐๐",
"icon": "๐ค",
"prompt_config": {"system": "ไฝ ๆฏไธไธชๆ็จ็ๅฉๆใYou are helpful. Vous รชtes utile. ๐", "parameters": []},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["name"] == "Unicodeๆต่ฏๅฏน่ฏ๐ค", res
assert res["data"]["description"] == "ๆต่ฏUnicodeๅญ็ฌฆๆฏๆ with รฉmojis ๐๐", res
@pytest.mark.p2
def test_create_dialog_with_extreme_parameter_values(self, WebApiAuth):
"""Test creating dialog with extreme parameter values"""
payload = {
"name": "extreme_params_dialog",
"top_n": 0,
"top_k": 1,
"similarity_threshold": 0.0,
"vector_similarity_weight": 1.0,
"prompt_config": {"system": "You are a helpful assistant.", "parameters": []},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["top_n"] == 0, res
assert res["data"]["top_k"] == 1, res
assert res["data"]["similarity_threshold"] == 0.0, res
assert res["data"]["vector_similarity_weight"] == 1.0, res
@pytest.mark.p2
def test_create_dialog_with_negative_parameter_values(self, WebApiAuth):
"""Test creating dialog with negative parameter values"""
payload = {
"name": "negative_params_dialog",
"top_n": -1,
"top_k": -100,
"similarity_threshold": -0.5,
"vector_similarity_weight": -0.3,
"prompt_config": {"system": "You are a helpful assistant.", "parameters": []},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] in [0, 102], res
@pytest.mark.p2
def test_update_dialog_with_empty_kb_ids(self, WebApiAuth, add_dialog_func):
"""Test updating dialog to remove all knowledge bases"""
dataset_id, dialog_id = add_dialog_func
payload = {"dialog_id": dialog_id, "kb_ids": [], "prompt_config": {"system": "You are a helpful assistant without knowledge.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["kb_ids"] == [], res
@pytest.mark.p2
def test_update_dialog_with_null_values(self, WebApiAuth, add_dialog_func):
"""Test updating dialog with null/None values"""
dataset_id, dialog_id = add_dialog_func
payload = {"dialog_id": dialog_id, "description": None, "icon": None, "rerank_id": None, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p3
def test_dialog_with_complex_prompt_parameters(self, WebApiAuth, add_dataset_func):
"""Test dialog with complex prompt parameter configurations"""
payload = {
"name": "complex_params_dialog",
"prompt_config": {
"system": "You are {role} assistant. Use {knowledge} and consider {context}. Optional: {optional_param}",
"parameters": [{"key": "role", "optional": False}, {"key": "knowledge", "optional": True}, {"key": "context", "optional": False}, {"key": "optional_param", "optional": True}],
},
"kb_ids": [add_dataset_func],
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p3
def test_dialog_with_malformed_prompt_parameters(self, WebApiAuth):
"""Test dialog with malformed prompt parameter configurations"""
payload = {
"name": "malformed_params_dialog",
"prompt_config": {
"system": "You are a helpful assistant.",
"parameters": [
{
"key": "",
"optional": False,
},
{"optional": True},
{
"key": "valid_param",
},
],
},
}
res = create_dialog(WebApiAuth, payload)
assert res["code"] in [0, 102], res
@pytest.mark.p3
def test_dialog_operations_with_special_ids(self, WebApiAuth):
"""Test dialog operations with special ID formats"""
special_ids = [
"00000000-0000-0000-0000-000000000000",
"ffffffff-ffff-ffff-ffff-ffffffffffff",
"12345678-1234-1234-1234-123456789abc",
]
for special_id in special_ids:
res = get_dialog(WebApiAuth, {"dialog_id": special_id})
assert res["code"] == 102, f"Should fail for ID: {special_id}"
res = delete_dialog(WebApiAuth, {"dialog_ids": [special_id]})
assert res["code"] == 103, f"Should fail for ID: {special_id}"
@pytest.mark.p3
def test_dialog_with_extremely_large_llm_settings(self, WebApiAuth):
"""Test dialog with very large LLM settings"""
large_llm_setting = {
"model": "gpt-4",
"temperature": 0.7,
"max_tokens": 999999,
"custom_param_" + "x" * 1000: "large_value_" + "y" * 1000,
}
payload = {"name": "large_llm_settings_dialog", "llm_setting": large_llm_setting, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = create_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p3
def test_concurrent_dialog_operations(self, WebApiAuth, add_dialog_func):
"""Test concurrent operations on the same dialog"""
from concurrent.futures import ThreadPoolExecutor, as_completed
_, dialog_id = add_dialog_func
def update_operation(i):
payload = {"dialog_id": dialog_id, "name": f"concurrent_update_{i}", "prompt_config": {"system": f"You are assistant number {i}.", "parameters": []}}
return update_dialog(WebApiAuth, payload)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(update_operation, i) for i in range(10)]
responses = [future.result() for future in as_completed(futures)]
successful_updates = sum(1 for response in responses if response["code"] == 0)
assert successful_updates > 0, "No updates succeeded"
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_dialog_edge_cases.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_get_dialog.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import create_dialog, get_dialog
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.usefixtures("clear_dialogs")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message, add_dialog_func):
_, dialog_id = add_dialog_func
res = get_dialog(invalid_auth, {"dialog_id": dialog_id})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDialogGet:
@pytest.mark.p1
def test_get_existing_dialog(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
data = res["data"]
assert data["id"] == dialog_id, res
assert "name" in data, res
assert "description" in data, res
assert "kb_ids" in data, res
assert "kb_names" in data, res
assert "prompt_config" in data, res
assert "llm_setting" in data, res
assert "top_n" in data, res
assert "top_k" in data, res
assert "similarity_threshold" in data, res
assert "vector_similarity_weight" in data, res
@pytest.mark.p1
def test_get_dialog_with_kb_names(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
data = res["data"]
assert isinstance(data["kb_ids"], list), res
assert isinstance(data["kb_names"], list), res
assert len(data["kb_ids"]) == len(data["kb_names"]), res
@pytest.mark.p2
def test_get_nonexistent_dialog(self, WebApiAuth):
fake_dialog_id = "nonexistent_dialog_id"
res = get_dialog(WebApiAuth, {"dialog_id": fake_dialog_id})
assert res["code"] == 102, res
assert "Dialog not found" in res["message"], res
@pytest.mark.p2
def test_get_dialog_missing_id(self, WebApiAuth):
res = get_dialog(WebApiAuth, {})
assert res["code"] == 100, res
assert res["message"] == "<BadRequestKeyError '400: Bad Request'>", res
@pytest.mark.p2
def test_get_dialog_empty_id(self, WebApiAuth):
res = get_dialog(WebApiAuth, {"dialog_id": ""})
assert res["code"] == 102, res
@pytest.mark.p2
def test_get_dialog_invalid_id_format(self, WebApiAuth):
res = get_dialog(WebApiAuth, {"dialog_id": "invalid_format"})
assert res["code"] == 102, res
@pytest.mark.p3
def test_get_dialog_data_structure(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
data = res["data"]
required_fields = [
"id",
"name",
"description",
"kb_ids",
"kb_names",
"prompt_config",
"llm_setting",
"top_n",
"top_k",
"similarity_threshold",
"vector_similarity_weight",
"create_time",
"update_time",
]
for field in required_fields:
assert field in data, f"Missing field: {field}"
assert isinstance(data["id"], str), res
assert isinstance(data["name"], str), res
assert isinstance(data["kb_ids"], list), res
assert isinstance(data["kb_names"], list), res
assert isinstance(data["prompt_config"], dict), res
assert isinstance(data["top_n"], int), res
assert isinstance(data["top_k"], int), res
assert isinstance(data["similarity_threshold"], (int, float)), res
assert isinstance(data["vector_similarity_weight"], (int, float)), res
@pytest.mark.p3
def test_get_dialog_prompt_config_structure(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
prompt_config = res["data"]["prompt_config"]
assert "system" in prompt_config, res
assert "parameters" in prompt_config, res
assert isinstance(prompt_config["system"], str), res
assert isinstance(prompt_config["parameters"], list), res
@pytest.mark.p3
def test_get_dialog_with_multiple_kbs(self, WebApiAuth, add_dataset_func):
dataset_id1 = add_dataset_func
dataset_id2 = add_dataset_func
payload = {
"name": "multi_kb_dialog",
"kb_ids": [dataset_id1, dataset_id2],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
create_res = create_dialog(WebApiAuth, payload)
assert create_res["code"] == 0, create_res
dialog_id = create_res["data"]["id"]
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
data = res["data"]
assert len(data["kb_ids"]) == 2, res
assert len(data["kb_names"]) == 2, res
assert dataset_id1 in data["kb_ids"], res
assert dataset_id2 in data["kb_ids"], res
@pytest.mark.p3
def test_get_dialog_with_invalid_kb(self, WebApiAuth):
payload = {
"name": "invalid_kb_dialog",
"kb_ids": ["invalid_kb_id"],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
create_res = create_dialog(WebApiAuth, payload)
assert create_res["code"] == 0, create_res
dialog_id = create_res["data"]["id"]
res = get_dialog(WebApiAuth, {"dialog_id": dialog_id})
assert res["code"] == 0, res
data = res["data"]
assert len(data["kb_ids"]) == 0, res
assert len(data["kb_names"]) == 0, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_get_dialog.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_list_dialogs.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import batch_create_dialogs, create_dialog, list_dialogs
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.usefixtures("clear_dialogs")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = list_dialogs(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDialogList:
@pytest.mark.p1
@pytest.mark.usefixtures("add_dialogs_func")
def test_list_empty_dialogs(self, WebApiAuth):
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
@pytest.mark.p1
def test_list_multiple_dialogs(self, WebApiAuth, add_dialogs_func):
_, dialog_ids = add_dialogs_func
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
returned_ids = [dialog["id"] for dialog in res["data"]]
for dialog_id in dialog_ids:
assert dialog_id in returned_ids, res
@pytest.mark.p2
@pytest.mark.usefixtures("add_dialogs_func")
def test_list_dialogs_data_structure(self, WebApiAuth):
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
dialog = res["data"][0]
required_fields = [
"id",
"name",
"description",
"kb_ids",
"kb_names",
"prompt_config",
"llm_setting",
"top_n",
"top_k",
"similarity_threshold",
"vector_similarity_weight",
"create_time",
"update_time",
]
for field in required_fields:
assert field in dialog, f"Missing field: {field}"
assert isinstance(dialog["id"], str), res
assert isinstance(dialog["name"], str), res
assert isinstance(dialog["kb_ids"], list), res
assert isinstance(dialog["kb_names"], list), res
assert isinstance(dialog["prompt_config"], dict), res
assert isinstance(dialog["top_n"], int), res
assert isinstance(dialog["top_k"], int), res
assert isinstance(dialog["similarity_threshold"], (int, float)), res
assert isinstance(dialog["vector_similarity_weight"], (int, float)), res
@pytest.mark.p2
@pytest.mark.usefixtures("add_dialogs_func")
def test_list_dialogs_with_kb_names(self, WebApiAuth):
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
dialog = res["data"][0]
assert isinstance(dialog["kb_ids"], list), res
assert isinstance(dialog["kb_names"], list), res
assert len(dialog["kb_ids"]) == len(dialog["kb_names"]), res
@pytest.mark.p2
@pytest.mark.usefixtures("add_dialogs_func")
def test_list_dialogs_ordering(self, WebApiAuth):
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
dialogs = res["data"]
for i in range(len(dialogs) - 1):
current_time = dialogs[i]["create_time"]
next_time = dialogs[i + 1]["create_time"]
assert current_time >= next_time, f"Dialogs not properly ordered: {current_time} should be >= {next_time}"
@pytest.mark.p3
@pytest.mark.usefixtures("clear_dialogs")
def test_list_dialogs_with_invalid_kb(self, WebApiAuth):
payload = {
"name": "invalid_kb_dialog",
"kb_ids": ["invalid_kb_id"],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
create_res = create_dialog(WebApiAuth, payload)
assert create_res["code"] == 0, create_res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
dialog = res["data"][0]
assert len(dialog["kb_ids"]) == 0, res
assert len(dialog["kb_names"]) == 0, res
@pytest.mark.p3
@pytest.mark.usefixtures("clear_dialogs")
def test_list_dialogs_with_multiple_kbs(self, WebApiAuth, add_dataset_func):
dataset_id1 = add_dataset_func
dataset_id2 = add_dataset_func
payload = {
"name": "multi_kb_dialog",
"kb_ids": [dataset_id1, dataset_id2],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
create_res = create_dialog(WebApiAuth, payload)
assert create_res["code"] == 0, create_res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
dialog = res["data"][0]
assert len(dialog["kb_ids"]) == 2, res
assert len(dialog["kb_names"]) == 2, res
assert dataset_id1 in dialog["kb_ids"], res
assert dataset_id2 in dialog["kb_ids"], res
@pytest.mark.p3
@pytest.mark.usefixtures("add_dialogs_func")
def test_list_dialogs_prompt_config_structure(self, WebApiAuth):
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
dialog = res["data"][0]
prompt_config = dialog["prompt_config"]
assert "system" in prompt_config, res
assert "parameters" in prompt_config, res
assert isinstance(prompt_config["system"], str), res
assert isinstance(prompt_config["parameters"], list), res
@pytest.mark.p3
@pytest.mark.usefixtures("clear_dialogs")
def test_list_dialogs_performance(self, WebApiAuth, add_document):
dataset_id, _ = add_document
dialog_ids = batch_create_dialogs(WebApiAuth, 100, [dataset_id])
assert len(dialog_ids) == 100, "Failed to create 100 dialogs"
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 100, res
returned_ids = [dialog["id"] for dialog in res["data"]]
for dialog_id in dialog_ids:
assert dialog_id in returned_ids, f"Dialog {dialog_id} not found in list"
@pytest.mark.p3
@pytest.mark.usefixtures("clear_dialogs")
def test_list_dialogs_with_mixed_kb_states(self, WebApiAuth, add_dataset_func):
valid_dataset_id = add_dataset_func
payload = {
"name": "mixed_kb_dialog",
"kb_ids": [valid_dataset_id, "invalid_kb_id"],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
create_res = create_dialog(WebApiAuth, payload)
assert create_res["code"] == 0, create_res
res = list_dialogs(WebApiAuth)
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
dialog = res["data"][0]
assert len(dialog["kb_ids"]) == 1, res
assert dialog["kb_ids"][0] == valid_dataset_id, res
assert len(dialog["kb_names"]) == 1, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_list_dialogs.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_update_dialog.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import update_dialog
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.usefixtures("clear_dialogs")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_id": dialog_id, "name": "updated_name", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(invalid_auth, payload)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDialogUpdate:
@pytest.mark.p1
def test_update_name(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
new_name = "updated_dialog_name"
payload = {"dialog_id": dialog_id, "name": new_name, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["name"] == new_name, res
@pytest.mark.p2
def test_update_description(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
new_description = "Updated description"
payload = {"dialog_id": dialog_id, "description": new_description, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["description"] == new_description, res
@pytest.mark.p1
def test_update_prompt_config(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
new_prompt_config = {"system": "You are an updated helpful assistant with {param1}.", "parameters": [{"key": "param1", "optional": False}]}
payload = {"dialog_id": dialog_id, "prompt_config": new_prompt_config}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["prompt_config"]["system"] == new_prompt_config["system"], res
@pytest.mark.p1
def test_update_kb_ids(self, WebApiAuth, add_dialog_func, add_dataset_func):
_, dialog_id = add_dialog_func
new_dataset_id = add_dataset_func
payload = {
"dialog_id": dialog_id,
"kb_ids": [new_dataset_id],
"prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]},
}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert new_dataset_id in res["data"]["kb_ids"], res
@pytest.mark.p1
def test_update_llm_settings(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
new_llm_setting = {"model": "gpt-4", "temperature": 0.9, "max_tokens": 2000}
payload = {"dialog_id": dialog_id, "llm_setting": new_llm_setting, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["llm_setting"]["model"] == "gpt-4", res
assert res["data"]["llm_setting"]["temperature"] == 0.9, res
@pytest.mark.p1
def test_update_retrieval_settings(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {
"dialog_id": dialog_id,
"top_n": 15,
"top_k": 4096,
"similarity_threshold": 0.3,
"vector_similarity_weight": 0.7,
"prompt_config": {"system": "You are a helpful assistant.", "parameters": []},
}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["top_n"] == 15, res
assert res["data"]["top_k"] == 4096, res
assert res["data"]["similarity_threshold"] == 0.3, res
assert res["data"]["vector_similarity_weight"] == 0.7, res
@pytest.mark.p2
def test_update_nonexistent_dialog(self, WebApiAuth):
fake_dialog_id = "nonexistent_dialog_id"
payload = {"dialog_id": fake_dialog_id, "name": "updated_name", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 102, res
assert "Dialog not found" in res["message"], res
@pytest.mark.p2
def test_update_with_invalid_prompt_config(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_id": dialog_id, "prompt_config": {"system": "You are a helpful assistant.", "parameters": [{"key": "unused_param", "optional": False}]}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 102, res
assert "Parameter 'unused_param' is not used" in res["message"], res
@pytest.mark.p2
def test_update_with_knowledge_but_no_kb(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_id": dialog_id, "kb_ids": [], "prompt_config": {"system": "You are a helpful assistant with knowledge: {knowledge}", "parameters": [{"key": "knowledge", "optional": True}]}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 102, res
assert "Please remove `{knowledge}` in system prompt" in res["message"], res
@pytest.mark.p2
def test_update_icon(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
new_icon = "๐"
payload = {"dialog_id": dialog_id, "icon": new_icon, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["icon"] == new_icon, res
@pytest.mark.p2
def test_update_rerank_id(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {"dialog_id": dialog_id, "rerank_id": "test_rerank_model", "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["rerank_id"] == "test_rerank_model", res
@pytest.mark.p3
def test_update_multiple_fields(self, WebApiAuth, add_dialog_func):
_, dialog_id = add_dialog_func
payload = {
"dialog_id": dialog_id,
"name": "multi_update_dialog",
"description": "Updated with multiple fields",
"icon": "๐",
"top_n": 20,
"similarity_threshold": 0.4,
"prompt_config": {"system": "You are a multi-updated assistant.", "parameters": []},
}
res = update_dialog(WebApiAuth, payload)
assert res["code"] == 0, res
data = res["data"]
assert data["name"] == "multi_update_dialog", res
assert data["description"] == "Updated with multiple fields", res
assert data["icon"] == "๐", res
assert data["top_n"] == 20, res
assert data["similarity_threshold"] == 0.4, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_update_dialog.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_create_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import add_chunk, delete_document, get_chunk, list_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
def validate_chunk_details(auth, kb_id, doc_id, payload, res):
chunk_id = res["data"]["chunk_id"]
res = get_chunk(auth, {"chunk_id": chunk_id})
assert res["code"] == 0, res
chunk = res["data"]
assert chunk["doc_id"] == doc_id
assert chunk["kb_id"] == kb_id
assert chunk["content_with_weight"] == payload["content_with_weight"]
if "important_kwd" in payload:
assert chunk["important_kwd"] == payload["important_kwd"]
if "question_kwd" in payload:
expected = [str(q).strip() for q in payload.get("question_kwd", [])]
assert chunk["question_kwd"] == expected
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = add_chunk(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestAddChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": None}, 100, """TypeError("unsupported operand type(s) for +: 'NoneType' and 'str'")"""),
({"content_with_weight": ""}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
pytest.param(
{"content_with_weight": 1},
100,
"""TypeError("unsupported operand type(s) for +: 'int' and 'str'")""",
marks=pytest.mark.skip,
),
({"content_with_weight": "a"}, 0, ""),
({"content_with_weight": " "}, 0, ""),
({"content_with_weight": "\n!?ใ๏ผ๏ผ๏ผ\"'"}, 0, ""),
],
)
def test_content(self, WebApiAuth, add_document, payload, expected_code, expected_message):
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == expected_code, res
if expected_code == 0:
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": "chunk test", "important_kwd": ["a", "b", "c"]}, 0, ""),
({"content_with_weight": "chunk test", "important_kwd": [""]}, 0, ""),
(
{"content_with_weight": "chunk test", "important_kwd": [1]},
100,
"TypeError('sequence item 0: expected str instance, int found')",
),
({"content_with_weight": "chunk test", "important_kwd": ["a", "a"]}, 0, ""),
({"content_with_weight": "chunk test", "important_kwd": "abc"}, 102, "`important_kwd` is required to be a list"),
({"content_with_weight": "chunk test", "important_kwd": 123}, 102, "`important_kwd` is required to be a list"),
],
)
def test_important_keywords(self, WebApiAuth, add_document, payload, expected_code, expected_message):
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == expected_code, res
if expected_code == 0:
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": "chunk test", "question_kwd": ["a", "b", "c"]}, 0, ""),
({"content_with_weight": "chunk test", "question_kwd": [""]}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
({"content_with_weight": "chunk test", "question_kwd": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"content_with_weight": "chunk test", "question_kwd": ["a", "a"]}, 0, ""),
({"content_with_weight": "chunk test", "question_kwd": "abc"}, 102, "`question_kwd` is required to be a list"),
({"content_with_weight": "chunk test", "question_kwd": 123}, 102, "`question_kwd` is required to be a list"),
],
)
def test_questions(self, WebApiAuth, add_document, payload, expected_code, expected_message):
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == expected_code, res
if expected_code == 0:
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
def test_get_chunk_not_found(self, WebApiAuth):
res = get_chunk(WebApiAuth, {"chunk_id": "missing_chunk_id"})
assert res["code"] != 0, res
assert "Chunk not found" in res["message"], res
@pytest.mark.p2
def test_create_chunk_with_tag_fields(self, WebApiAuth, add_document):
_, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
payload = {
"doc_id": doc_id,
"content_with_weight": "chunk with tags",
"tag_feas": [0.1, 0.2],
"important_kwd": ["tag"],
"question_kwd": ["question"],
}
res = add_chunk(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["chunk_id"], res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
@pytest.mark.p3
@pytest.mark.parametrize(
"doc_id, expected_code, expected_message",
[
("", 102, "Document not found!"),
("invalid_document_id", 102, "Document not found!"),
],
)
def test_invalid_document_id(self, WebApiAuth, add_document, doc_id, expected_code, expected_message):
_, _ = add_document
res = add_chunk(WebApiAuth, {"doc_id": doc_id, "content_with_weight": "chunk test"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.p3
def test_repeated_add_chunk(self, WebApiAuth, add_document):
payload = {"content_with_weight": "chunk test"}
kb_id, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
chunks_count = res["data"]["doc"]["chunk_num"]
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == 0, res
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 1, res
res = add_chunk(WebApiAuth, {**payload, "doc_id": doc_id})
assert res["code"] == 0, res
validate_chunk_details(WebApiAuth, kb_id, doc_id, payload, res)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + 2, res
@pytest.mark.p2
def test_add_chunk_to_deleted_document(self, WebApiAuth, add_document):
_, doc_id = add_document
delete_document(WebApiAuth, {"doc_id": doc_id})
res = add_chunk(WebApiAuth, {"doc_id": doc_id, "content_with_weight": "chunk test"})
assert res["code"] == 102, res
assert res["message"] == "Document not found!", res
@pytest.mark.skip(reason="issues/6411")
@pytest.mark.p3
def test_concurrent_add_chunk(self, WebApiAuth, add_document):
count = 50
_, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] == 0:
chunks_count = res["data"]["doc"]["chunk_num"]
else:
chunks_count = 0
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
add_chunk,
WebApiAuth,
{"doc_id": doc_id, "content_with_weight": f"chunk test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert res["data"]["doc"]["chunk_num"] == chunks_count + count
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_chunk_app/test_create_chunk.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_list_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks, list_chunks, update_chunk
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_chunks(invalid_auth, {"doc_id": "document_id"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestChunksList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
pytest.param({"page": None, "size": 2}, 100, 0, """TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""", marks=pytest.mark.skip),
pytest.param({"page": 0, "size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "size": 2}, 0, 2, ""),
({"page": 3, "size": 2}, 0, 1, ""),
({"page": "3", "size": 2}, 0, 1, ""),
pytest.param({"page": -1, "size": 2}, 100, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, WebApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id}
if params:
payload.update(params)
res = list_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"size": None}, 100, 0, """TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")"""),
pytest.param({"size": 0}, 0, 5, ""),
({"size": 1}, 0, 1, ""),
({"size": 6}, 0, 5, ""),
({"size": "1"}, 0, 1, ""),
pytest.param({"size": -1}, 0, 5, "", marks=pytest.mark.skip),
pytest.param({"size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, WebApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id}
if params:
payload.update(params)
res = list_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
def test_available_int_filter(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
res = update_chunk(
WebApiAuth,
{"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content", "available_int": 0},
)
assert res["code"] == 0, res
from time import sleep
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id, "available_int": 0})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) >= 1, res
assert all(chunk["available_int"] == 0 for chunk in res["data"]["chunks"]), res
# Restore the class-scoped fixture state for subsequent keyword cases.
res = update_chunk(
WebApiAuth,
{"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "chunk test 0", "available_int": 1},
)
assert res["code"] == 0, res
sleep(1)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "1"}, 1),
pytest.param({"keywords": "chunk"}, 4, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6509")),
({"keywords": "content"}, 1),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, WebApiAuth, add_chunks, params, expected_page_size):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id}
if params:
payload.update(params)
res = list_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == expected_page_size, res
@pytest.mark.p3
def test_invalid_params(self, WebApiAuth, add_chunks):
_, doc_id, _ = add_chunks
payload = {"doc_id": doc_id, "a": "b"}
res = list_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 5, res
@pytest.mark.p3
def test_concurrent_list(self, WebApiAuth, add_chunks):
_, doc_id, _ = add_chunks
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_chunks, WebApiAuth, {"doc_id": doc_id}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(len(future.result()["data"]["chunks"]) == 5 for future in futures)
@pytest.mark.p1
def test_default(self, WebApiAuth, add_document):
_, doc_id = add_document
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
chunks_count = res["data"]["doc"]["chunk_num"]
batch_add_chunks(WebApiAuth, doc_id, 31)
# issues/6487
from time import sleep
sleep(3)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0
assert len(res["data"]["chunks"]) == 30
assert res["data"]["doc"]["chunk_num"] == chunks_count + 31
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_chunk_app/test_list_chunks.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import retrieval_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = retrieval_chunks(invalid_auth, {"kb_id": "dummy_kb_id", "question": "dummy question"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestChunksRetrieval:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"question": "chunk", "kb_id": None}, 0, 4, ""),
({"question": "chunk", "doc_ids": None}, 101, 0, "required argument are missing: kb_id; "),
({"question": "chunk", "kb_id": None, "doc_ids": None}, 0, 4, ""),
({"question": "chunk"}, 101, 0, "required argument are missing: kb_id; "),
],
)
def test_basic_scenarios(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks
if "kb_id" in payload:
payload["kb_id"] = [dataset_id]
if "doc_ids" in payload:
payload["doc_ids"] = [document_id]
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
pytest.param(
{"page": None, "size": 2},
100,
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""",
marks=pytest.mark.skip,
),
pytest.param(
{"page": 0, "size": 2},
100,
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param({"page": 2, "size": 2}, 0, 2, "", marks=pytest.mark.skip(reason="issues/6646")),
({"page": 3, "size": 2}, 0, 0, ""),
({"page": "3", "size": 2}, 0, 0, ""),
pytest.param(
{"page": -1, "size": 2},
100,
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param(
{"page": "a", "size": 2},
100,
0,
"""ValueError("invalid literal for int() with base 10: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_page(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
pytest.param(
{"size": None},
100,
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""",
marks=pytest.mark.skip,
),
# ({"size": 0}, 0, 0, ""),
({"size": 1}, 0, 1, ""),
({"size": 5}, 0, 4, ""),
({"size": "1"}, 0, 1, ""),
# ({"size": -1}, 0, 0, ""),
pytest.param(
{"size": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_page_size(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"vector_similarity_weight": 0}, 0, 4, ""),
({"vector_similarity_weight": 0.5}, 0, 4, ""),
({"vector_similarity_weight": 10}, 0, 4, ""),
pytest.param(
{"vector_similarity_weight": "a"},
100,
0,
"""ValueError("could not convert string to float: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_vector_similarity_weight(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"top_k": 10}, 0, 4, ""),
pytest.param(
{"top_k": 1},
0,
4,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": 1},
0,
1,
"",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": -1},
100,
4,
"must be greater than 0",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": -1},
100,
4,
"3014",
marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": "a"},
100,
0,
"""ValueError("invalid literal for int() with base 10: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_top_k(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert expected_message in res["message"], res
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"rerank_id": "BAAI/bge-reranker-v2-m3"}, 0, ""),
pytest.param({"rerank_id": "unknown"}, 100, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip),
],
)
def test_rerank_id(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) > 0, res
else:
assert expected_message in res["message"], res
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_code, expected_page_size, expected_message",
[
({"keyword": True}, 0, 5, ""),
({"keyword": "True"}, 0, 5, ""),
({"keyword": False}, 0, 5, ""),
({"keyword": "False"}, 0, 5, ""),
({"keyword": None}, 0, 5, ""),
],
)
def test_keyword(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk test", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_highlight, expected_message",
[
pytest.param({"highlight": True}, 0, True, "", marks=pytest.mark.skip(reason="highlight not functionnal")),
pytest.param({"highlight": "True"}, 0, True, "", marks=pytest.mark.skip(reason="highlight not functionnal")),
({"highlight": False}, 0, False, ""),
({"highlight": "False"}, 0, False, ""),
({"highlight": None}, 0, False, "")
],
)
def test_highlight(self, WebApiAuth, add_chunks, payload, expected_code, expected_highlight, expected_message):
dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "kb_id": [dataset_id]})
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_highlight:
for chunk in res["data"]["chunks"]:
assert "highlight" in chunk, res
else:
for chunk in res["data"]["chunks"]:
assert "highlight" not in chunk, res
if expected_code != 0:
assert res["message"] == expected_message, res
@pytest.mark.p3
def test_invalid_params(self, WebApiAuth, add_chunks):
dataset_id, _, _ = add_chunks
payload = {"question": "chunk", "kb_id": [dataset_id], "a": "b"}
res = retrieval_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 4, res
@pytest.mark.p3
def test_concurrent_retrieval(self, WebApiAuth, add_chunks):
dataset_id, _, _ = add_chunks
count = 100
payload = {"question": "chunk", "kb_id": [dataset_id]}
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(retrieval_chunks, WebApiAuth, payload) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py",
"license": "Apache License 2.0",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_rm_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks, delete_chunks, list_chunks
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_chunks(invalid_auth, {"doc_id": "document_id", "chunk_ids": ["1"]})
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestChunksDeletion:
@pytest.mark.p3
@pytest.mark.parametrize(
"doc_id, expected_code, expected_message",
[
("", 102, "Document not found!"),
("invalid_document_id", 102, "Document not found!"),
],
)
def test_invalid_document_id(self, WebApiAuth, add_chunks_func, doc_id, expected_code, expected_message):
_, _, chunk_ids = add_chunks_func
res = delete_chunks(WebApiAuth, {"doc_id": doc_id, "chunk_ids": chunk_ids})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"chunk_ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"chunk_ids": r[:1] + ["invalid_id"] + r[1:4]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"chunk_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, WebApiAuth, add_chunks_func, payload):
_, doc_id, chunk_ids = add_chunks_func
if callable(payload):
payload = payload(chunk_ids)
payload["doc_id"] = doc_id
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 0, res
assert res["data"]["total"] == 0, res
@pytest.mark.p3
def test_repeated_deletion(self, WebApiAuth, add_chunks_func):
_, doc_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids, "doc_id": doc_id}
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "Index updating failure", res
@pytest.mark.p3
def test_duplicate_deletion(self, WebApiAuth, add_chunks_func):
_, doc_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids * 2, "doc_id": doc_id}
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 0, res
assert res["data"]["total"] == 0, res
@pytest.mark.p2
def test_delete_scalar_chunk_id_payload(self, WebApiAuth, add_chunks_func):
_, doc_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids[0], "doc_id": doc_id}
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 3, res
assert res["data"]["total"] == 3, res
@pytest.mark.p2
def test_delete_duplicate_ids_dedup_behavior(self, WebApiAuth, add_chunks_func):
_, doc_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": [chunk_ids[0], chunk_ids[0]], "doc_id": doc_id}
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
assert len(res["data"]["chunks"]) == 3, res
assert res["data"]["total"] == 3, res
@pytest.mark.p3
def test_concurrent_deletion(self, WebApiAuth, add_document):
count = 100
_, doc_id = add_document
chunk_ids = batch_add_chunks(WebApiAuth, doc_id, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
delete_chunks,
WebApiAuth,
{"doc_id": doc_id, "chunk_ids": chunk_ids[i : i + 1]},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_delete_1k(self, WebApiAuth, add_document):
chunks_num = 1_000
_, doc_id = add_document
chunk_ids = batch_add_chunks(WebApiAuth, doc_id, chunks_num)
from time import sleep
sleep(1)
res = delete_chunks(WebApiAuth, {"doc_id": doc_id, "chunk_ids": chunk_ids})
assert res["code"] == 0
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == 0, res
assert res["data"]["total"] == 0, res
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", 5, marks=pytest.mark.skip),
pytest.param({"chunk_ids": ["invalid_id"]}, 102, "Index updating failure", 4, marks=pytest.mark.p3),
pytest.param("not json", 100, """UnboundLocalError("local variable \'duplicate_messages\' referenced before assignment")""", 5, marks=pytest.mark.skip(reason="pull/6376")),
pytest.param(lambda r: {"chunk_ids": r[:1]}, 0, "", 3, marks=pytest.mark.p3),
pytest.param(lambda r: {"chunk_ids": r}, 0, "", 0, marks=pytest.mark.p1),
pytest.param({"chunk_ids": []}, 0, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(self, WebApiAuth, add_chunks_func, payload, expected_code, expected_message, remaining):
_, doc_id, chunk_ids = add_chunks_func
if callable(payload):
payload = payload(chunk_ids)
payload["doc_id"] = doc_id
res = delete_chunks(WebApiAuth, payload)
assert res["code"] == expected_code, res
if res["code"] != 0:
assert res["message"] == expected_message, res
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
if res["code"] != 0:
assert False, res
assert len(res["data"]["chunks"]) == remaining, res
assert res["data"]["total"] == remaining, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_chunk_app/test_rm_chunks.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_update_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
from time import sleep
import pytest
from common import delete_document, list_chunks, update_chunk
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = update_chunk(invalid_auth, {"doc_id": "doc_id", "chunk_id": "chunk_id", "content_with_weight": "test"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestUpdateChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"content_with_weight": None}, 100, "TypeError('expected string or bytes-like object')"),
({"content_with_weight": ""}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
({"content_with_weight": 1}, 100, "TypeError('expected string or bytes-like object')"),
({"content_with_weight": "update chunk"}, 0, ""),
({"content_with_weight": " "}, 0, ""),
({"content_with_weight": "\n!?ใ๏ผ๏ผ๏ผ\"'"}, 0, ""),
],
)
def test_content(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id}
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["content_with_weight"] == payload["content_with_weight"]
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"important_kwd": ["a", "b", "c"]}, 0, ""),
({"important_kwd": [""]}, 0, ""),
({"important_kwd": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"important_kwd": ["a", "a"]}, 0, ""),
({"important_kwd": "abc"}, 102, "`important_kwd` should be a list"),
({"important_kwd": 123}, 102, "`important_kwd` should be a list"),
],
)
def test_important_keywords(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"} # Add content_with_weight as it's required
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["important_kwd"] == payload["important_kwd"]
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"question_kwd": ["a", "b", "c"]}, 0, ""),
({"question_kwd": [""]}, 100, """Exception('Error: 413 - {"error":"Input validation error: `inputs` cannot be empty","error_type":"Validation"}')"""),
({"question_kwd": [1]}, 100, "TypeError('sequence item 0: expected str instance, int found')"),
({"question_kwd": ["a", "a"]}, 0, ""),
({"question_kwd": "abc"}, 102, "`question_kwd` should be a list"),
({"question_kwd": 123}, 102, "`question_kwd` should be a list"),
],
)
def test_questions(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"} # Add content_with_weight as it's required
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["question_kwd"] == payload["question_kwd"]
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"available_int": 1}, 0, ""),
({"available_int": 0}, 0, ""),
],
)
def test_available(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"}
if payload:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
else:
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
for chunk in res["data"]["chunks"]:
if chunk["chunk_id"] == chunk_id:
assert chunk["available_int"] == payload["available_int"]
@pytest.mark.p2
def test_update_chunk_qa_multiline_content(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
payload = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "Question line\nAnswer line"}
res = update_chunk(WebApiAuth, payload)
assert res["code"] == 0, res
sleep(1)
res = list_chunks(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
chunk = next(chunk for chunk in res["data"]["chunks"] if chunk["chunk_id"] == chunk_ids[0])
assert chunk["content_with_weight"] == payload["content_with_weight"], res
@pytest.mark.p2
def test_update_chunk_with_image_payload(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
payload = {
"doc_id": doc_id,
"chunk_id": chunk_ids[0],
"content_with_weight": "content with image",
"image_base64": base64.b64encode(b"img").decode("utf-8"),
"img_id": "bucket-name",
}
res = update_chunk(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p3
@pytest.mark.parametrize(
"doc_id_param, expected_code, expected_message",
[
("", 102, "Tenant not found!"),
("invalid_doc_id", 102, "Tenant not found!"),
],
)
def test_invalid_document_id_for_update(self, WebApiAuth, add_chunks, doc_id_param, expected_code, expected_message):
_, _, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
payload = {"doc_id": doc_id_param, "chunk_id": chunk_id, "content_with_weight": "test content"}
res = update_chunk(WebApiAuth, payload)
assert res["code"] == expected_code
assert expected_message in res["message"]
@pytest.mark.p3
def test_repeated_update_chunk(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
payload1 = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "chunk test 1"}
res = update_chunk(WebApiAuth, payload1)
assert res["code"] == 0
payload2 = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "chunk test 2"}
res = update_chunk(WebApiAuth, payload2)
assert res["code"] == 0
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
({"unknown_key": "unknown_value"}, 0, ""),
({}, 0, ""),
pytest.param(None, 100, """TypeError("int() argument must be a string, a bytes-like object or a real number, not 'NoneType'")""", marks=pytest.mark.skip),
],
)
def test_invalid_params(self, WebApiAuth, add_chunks, payload, expected_code, expected_message):
_, doc_id, chunk_ids = add_chunks
chunk_id = chunk_ids[0]
update_payload = {"doc_id": doc_id, "chunk_id": chunk_id, "content_with_weight": "unchanged content"}
if payload is not None:
update_payload.update(payload)
res = update_chunk(WebApiAuth, update_payload)
assert res["code"] == expected_code, res
if expected_code != 0:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554")
def test_concurrent_update_chunk(self, WebApiAuth, add_chunks):
count = 50
_, doc_id, chunk_ids = add_chunks
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
update_chunk,
WebApiAuth,
{"doc_id": doc_id, "chunk_id": chunk_ids[randint(0, 3)], "content_with_weight": f"update chunk test {i}"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3
def test_update_chunk_to_deleted_document(self, WebApiAuth, add_chunks):
_, doc_id, chunk_ids = add_chunks
delete_document(WebApiAuth, {"doc_id": doc_id})
payload = {"doc_id": doc_id, "chunk_id": chunk_ids[0], "content_with_weight": "test content"}
res = update_chunk(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "Tenant not found!", res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_chunk_app/test_update_chunk.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/apps/sdk/files.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pathlib
import re
from quart import request, make_response
from pathlib import Path
from api.db.services.document_service import DocumentService
from api.db.services.file2document_service import File2DocumentService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.utils.api_utils import get_json_result, get_request_json, server_error_response, token_required
from common.misc_utils import get_uuid, thread_pool_exec
from api.db import FileType
from api.db.services import duplicate_name
from api.db.services.file_service import FileService
from api.utils.file_utils import filename_type
from api.utils.web_utils import CONTENT_TYPE_MAP, apply_safe_file_response_headers
from common import settings
from common.constants import RetCode
@manager.route('/file/upload', methods=['POST']) # noqa: F821
@token_required
async def upload(tenant_id):
"""
Upload a file to the system.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: formData
name: file
type: file
required: true
description: The file to upload
- in: formData
name: parent_id
type: string
description: Parent folder ID where the file will be uploaded. Optional.
responses:
200:
description: Successfully uploaded the file.
schema:
type: object
properties:
data:
type: array
items:
type: object
properties:
id:
type: string
description: File ID
name:
type: string
description: File name
size:
type: integer
description: File size in bytes
type:
type: string
description: File type (e.g., document, folder)
"""
form = await request.form
files = await request.files
pf_id = form.get("parent_id")
if not pf_id:
root_folder = FileService.get_root_folder(tenant_id)
pf_id = root_folder["id"]
if 'file' not in files:
return get_json_result(data=False, message='No file part!', code=RetCode.BAD_REQUEST)
file_objs = files.getlist('file')
for file_obj in file_objs:
if file_obj.filename == '':
return get_json_result(data=False, message='No selected file!', code=RetCode.BAD_REQUEST)
file_res = []
try:
e, pf_folder = FileService.get_by_id(pf_id)
if not e:
return get_json_result(data=False, message="Can't find this folder!", code=RetCode.NOT_FOUND)
for file_obj in file_objs:
# Handle file path
full_path = '/' + file_obj.filename
file_obj_names = full_path.split('/')
file_len = len(file_obj_names)
# Get folder path ID
file_id_list = FileService.get_id_list_by_id(pf_id, file_obj_names, 1, [pf_id])
len_id_list = len(file_id_list)
# Crete file folder
if file_len != len_id_list:
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
if not e:
return get_json_result(data=False, message="Folder not found!", code=RetCode.NOT_FOUND)
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
len_id_list)
else:
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
if not e:
return get_json_result(data=False, message="Folder not found!", code=RetCode.NOT_FOUND)
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
len_id_list)
filetype = filename_type(file_obj_names[file_len - 1])
location = file_obj_names[file_len - 1]
while settings.STORAGE_IMPL.obj_exist(last_folder.id, location):
location += "_"
blob = file_obj.read()
filename = duplicate_name(FileService.query, name=file_obj_names[file_len - 1], parent_id=last_folder.id)
file = {
"id": get_uuid(),
"parent_id": last_folder.id,
"tenant_id": tenant_id,
"created_by": tenant_id,
"type": filetype,
"name": filename,
"location": location,
"size": len(blob),
}
file = FileService.insert(file)
settings.STORAGE_IMPL.put(last_folder.id, location, blob)
file_res.append(file.to_json())
return get_json_result(data=file_res)
except Exception as e:
return server_error_response(e)
@manager.route('/file/create', methods=['POST']) # noqa: F821
@token_required
async def create(tenant_id):
"""
Create a new file or folder.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: body
name: body
description: File creation parameters
required: true
schema:
type: object
properties:
name:
type: string
description: Name of the file/folder
parent_id:
type: string
description: Parent folder ID. Optional.
type:
type: string
enum: ["FOLDER", "VIRTUAL"]
description: Type of the file
responses:
200:
description: File created successfully.
schema:
type: object
properties:
data:
type: object
properties:
id:
type: string
name:
type: string
type:
type: string
"""
req = await get_request_json()
pf_id = req.get("parent_id")
input_file_type = req.get("type")
if not pf_id:
root_folder = FileService.get_root_folder(tenant_id)
pf_id = root_folder["id"]
try:
if not FileService.is_parent_folder_exist(pf_id):
return get_json_result(data=False, message="Parent Folder Doesn't Exist!", code=RetCode.BAD_REQUEST)
if FileService.query(name=req["name"], parent_id=pf_id):
return get_json_result(data=False, message="Duplicated folder name in the same folder.",
code=RetCode.CONFLICT)
if input_file_type == FileType.FOLDER.value:
file_type = FileType.FOLDER.value
else:
file_type = FileType.VIRTUAL.value
file = FileService.insert({
"id": get_uuid(),
"parent_id": pf_id,
"tenant_id": tenant_id,
"created_by": tenant_id,
"name": req["name"],
"location": "",
"size": 0,
"type": file_type
})
return get_json_result(data=file.to_json())
except Exception as e:
return server_error_response(e)
@manager.route('/file/list', methods=['GET']) # noqa: F821
@token_required
async def list_files(tenant_id):
"""
List files under a specific folder.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: query
name: parent_id
type: string
description: Folder ID to list files from
- in: query
name: keywords
type: string
description: Search keyword filter
- in: query
name: page
type: integer
default: 1
description: Page number
- in: query
name: page_size
type: integer
default: 15
description: Number of results per page
- in: query
name: orderby
type: string
default: "create_time"
description: Sort by field
- in: query
name: desc
type: boolean
default: true
description: Descending order
responses:
200:
description: Successfully retrieved file list.
schema:
type: object
properties:
total:
type: integer
files:
type: array
items:
type: object
properties:
id:
type: string
name:
type: string
type:
type: string
size:
type: integer
create_time:
type: string
format: date-time
"""
pf_id = request.args.get("parent_id")
keywords = request.args.get("keywords", "")
page_number = int(request.args.get("page", 1))
items_per_page = int(request.args.get("page_size", 15))
orderby = request.args.get("orderby", "create_time")
desc = request.args.get("desc", True)
if not pf_id:
root_folder = FileService.get_root_folder(tenant_id)
pf_id = root_folder["id"]
FileService.init_knowledgebase_docs(pf_id, tenant_id)
try:
e, file = FileService.get_by_id(pf_id)
if not e:
return get_json_result(message="Folder not found!", code=RetCode.NOT_FOUND)
files, total = FileService.get_by_pf_id(tenant_id, pf_id, page_number, items_per_page, orderby, desc, keywords)
parent_folder = FileService.get_parent_folder(pf_id)
if not parent_folder:
return get_json_result(message="File not found!", code=RetCode.NOT_FOUND)
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
except Exception as e:
return server_error_response(e)
@manager.route('/file/root_folder', methods=['GET']) # noqa: F821
@token_required
async def get_root_folder(tenant_id):
"""
Get user's root folder.
---
tags:
- File
security:
- ApiKeyAuth: []
responses:
200:
description: Root folder information
schema:
type: object
properties:
data:
type: object
properties:
root_folder:
type: object
properties:
id:
type: string
name:
type: string
type:
type: string
"""
try:
root_folder = FileService.get_root_folder(tenant_id)
return get_json_result(data={"root_folder": root_folder})
except Exception as e:
return server_error_response(e)
@manager.route('/file/parent_folder', methods=['GET']) # noqa: F821
@token_required
async def get_parent_folder():
"""
Get parent folder info of a file.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: query
name: file_id
type: string
required: true
description: Target file ID
responses:
200:
description: Parent folder information
schema:
type: object
properties:
data:
type: object
properties:
parent_folder:
type: object
properties:
id:
type: string
name:
type: string
"""
file_id = request.args.get("file_id")
try:
e, file = FileService.get_by_id(file_id)
if not e:
return get_json_result(message="Folder not found!", code=RetCode.NOT_FOUND)
parent_folder = FileService.get_parent_folder(file_id)
return get_json_result(data={"parent_folder": parent_folder.to_json()})
except Exception as e:
return server_error_response(e)
@manager.route('/file/all_parent_folder', methods=['GET']) # noqa: F821
@token_required
async def get_all_parent_folders(tenant_id):
"""
Get all parent folders of a file.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: query
name: file_id
type: string
required: true
description: Target file ID
responses:
200:
description: All parent folders of the file
schema:
type: object
properties:
data:
type: object
properties:
parent_folders:
type: array
items:
type: object
properties:
id:
type: string
name:
type: string
"""
file_id = request.args.get("file_id")
try:
e, file = FileService.get_by_id(file_id)
if not e:
return get_json_result(message="Folder not found!", code=RetCode.NOT_FOUND)
parent_folders = FileService.get_all_parent_folders(file_id)
parent_folders_res = [folder.to_json() for folder in parent_folders]
return get_json_result(data={"parent_folders": parent_folders_res})
except Exception as e:
return server_error_response(e)
@manager.route('/file/rm', methods=['POST']) # noqa: F821
@token_required
async def rm(tenant_id):
"""
Delete one or multiple files/folders.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: body
name: body
description: Files to delete
required: true
schema:
type: object
properties:
file_ids:
type: array
items:
type: string
description: List of file IDs to delete
responses:
200:
description: Successfully deleted files
schema:
type: object
properties:
data:
type: boolean
example: true
"""
req = await get_request_json()
file_ids = req["file_ids"]
try:
for file_id in file_ids:
e, file = FileService.get_by_id(file_id)
if not e:
return get_json_result(message="File or Folder not found!", code=RetCode.NOT_FOUND)
if not file.tenant_id:
return get_json_result(message="Tenant not found!", code=RetCode.NOT_FOUND)
if file.type == FileType.FOLDER.value:
file_id_list = FileService.get_all_innermost_file_ids(file_id, [])
for inner_file_id in file_id_list:
e, file = FileService.get_by_id(inner_file_id)
if not e:
return get_json_result(message="File not found!", code=RetCode.NOT_FOUND)
settings.STORAGE_IMPL.rm(file.parent_id, file.location)
FileService.delete_folder_by_pf_id(tenant_id, file_id)
else:
settings.STORAGE_IMPL.rm(file.parent_id, file.location)
if not FileService.delete(file):
return get_json_result(message="Database error (File removal)!", code=RetCode.SERVER_ERROR)
informs = File2DocumentService.get_by_file_id(file_id)
for inform in informs:
doc_id = inform.document_id
e, doc = DocumentService.get_by_id(doc_id)
if not e:
return get_json_result(message="Document not found!", code=RetCode.NOT_FOUND)
tenant_id = DocumentService.get_tenant_id(doc_id)
if not tenant_id:
return get_json_result(message="Tenant not found!", code=RetCode.NOT_FOUND)
if not DocumentService.remove_document(doc, tenant_id):
return get_json_result(message="Database error (Document removal)!", code=RetCode.SERVER_ERROR)
File2DocumentService.delete_by_file_id(file_id)
return get_json_result(data=True)
except Exception as e:
return server_error_response(e)
@manager.route('/file/rename', methods=['POST']) # noqa: F821
@token_required
async def rename(tenant_id):
"""
Rename a file.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: body
name: body
description: Rename file
required: true
schema:
type: object
properties:
file_id:
type: string
description: Target file ID
name:
type: string
description: New name for the file
responses:
200:
description: File renamed successfully
schema:
type: object
properties:
data:
type: boolean
example: true
"""
req = await get_request_json()
try:
e, file = FileService.get_by_id(req["file_id"])
if not e:
return get_json_result(message="File not found!", code=RetCode.NOT_FOUND)
if file.type != FileType.FOLDER.value and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
file.name.lower()).suffix:
return get_json_result(data=False, message="The extension of file can't be changed",
code=RetCode.BAD_REQUEST)
for existing_file in FileService.query(name=req["name"], pf_id=file.parent_id):
if existing_file.name == req["name"]:
return get_json_result(data=False, message="Duplicated file name in the same folder.",
code=RetCode.CONFLICT)
if not FileService.update_by_id(req["file_id"], {"name": req["name"]}):
return get_json_result(message="Database error (File rename)!", code=RetCode.SERVER_ERROR)
informs = File2DocumentService.get_by_file_id(req["file_id"])
if informs:
if not DocumentService.update_by_id(informs[0].document_id, {"name": req["name"]}):
return get_json_result(message="Database error (Document rename)!", code=RetCode.SERVER_ERROR)
return get_json_result(data=True)
except Exception as e:
return server_error_response(e)
@manager.route('/file/get/<file_id>', methods=['GET']) # noqa: F821
@token_required
async def get(tenant_id, file_id):
"""
Download a file.
---
tags:
- File
security:
- ApiKeyAuth: []
produces:
- application/octet-stream
parameters:
- in: path
name: file_id
type: string
required: true
description: File ID to download
responses:
200:
description: File stream
schema:
type: file
RetCode.NOT_FOUND:
description: File not found
"""
try:
e, file = FileService.get_by_id(file_id)
if not e:
return get_json_result(message="Document not found!", code=RetCode.NOT_FOUND)
blob = settings.STORAGE_IMPL.get(file.parent_id, file.location)
if not blob:
b, n = File2DocumentService.get_storage_address(file_id=file_id)
blob = settings.STORAGE_IMPL.get(b, n)
response = await make_response(blob)
ext = re.search(r"\.([^.]+)$", file.name)
extension = ext.group(1).lower() if ext else None
content_type = None
if extension:
fallback_prefix = "image" if file.type == FileType.VISUAL.value else "application"
content_type = CONTENT_TYPE_MAP.get(extension, f"{fallback_prefix}/{extension}")
apply_safe_file_response_headers(response, content_type, extension)
return response
except Exception as e:
return server_error_response(e)
@manager.route("/file/download/<attachment_id>", methods=["GET"]) # noqa: F821
@token_required
async def download_attachment(tenant_id, attachment_id):
try:
ext = request.args.get("ext", "markdown")
data = await thread_pool_exec(settings.STORAGE_IMPL.get, tenant_id, attachment_id)
response = await make_response(data)
content_type = CONTENT_TYPE_MAP.get(ext, f"application/{ext}")
apply_safe_file_response_headers(response, content_type, ext)
return response
except Exception as e:
return server_error_response(e)
@manager.route('/file/mv', methods=['POST']) # noqa: F821
@token_required
async def move(tenant_id):
"""
Move one or multiple files to another folder.
---
tags:
- File
security:
- ApiKeyAuth: []
parameters:
- in: body
name: body
description: Move operation
required: true
schema:
type: object
properties:
src_file_ids:
type: array
items:
type: string
description: Source file IDs
dest_file_id:
type: string
description: Destination folder ID
responses:
200:
description: Files moved successfully
schema:
type: object
properties:
data:
type: boolean
example: true
"""
req = await get_request_json()
try:
file_ids = req["src_file_ids"]
parent_id = req["dest_file_id"]
files = FileService.get_by_ids(file_ids)
files_dict = {f.id: f for f in files}
for file_id in file_ids:
file = files_dict[file_id]
if not file:
return get_json_result(message="File or Folder not found!", code=RetCode.NOT_FOUND)
if not file.tenant_id:
return get_json_result(message="Tenant not found!", code=RetCode.NOT_FOUND)
fe, _ = FileService.get_by_id(parent_id)
if not fe:
return get_json_result(message="Parent Folder not found!", code=RetCode.NOT_FOUND)
FileService.move_file(file_ids, parent_id)
return get_json_result(data=True)
except Exception as e:
return server_error_response(e)
@manager.route('/file/convert', methods=['POST']) # noqa: F821
@token_required
async def convert(tenant_id):
req = await get_request_json()
kb_ids = req["kb_ids"]
file_ids = req["file_ids"]
file2documents = []
try:
files = FileService.get_by_ids(file_ids)
files_set = dict({file.id: file for file in files})
for file_id in file_ids:
file = files_set[file_id]
if not file:
return get_json_result(message="File not found!", code=RetCode.NOT_FOUND)
file_ids_list = [file_id]
if file.type == FileType.FOLDER.value:
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
for id in file_ids_list:
informs = File2DocumentService.get_by_file_id(id)
# delete
for inform in informs:
doc_id = inform.document_id
e, doc = DocumentService.get_by_id(doc_id)
if not e:
return get_json_result(message="Document not found!", code=RetCode.NOT_FOUND)
tenant_id = DocumentService.get_tenant_id(doc_id)
if not tenant_id:
return get_json_result(message="Tenant not found!", code=RetCode.NOT_FOUND)
if not DocumentService.remove_document(doc, tenant_id):
return get_json_result(
message="Database error (Document removal)!", code=RetCode.NOT_FOUND)
File2DocumentService.delete_by_file_id(id)
# insert
for kb_id in kb_ids:
e, kb = KnowledgebaseService.get_by_id(kb_id)
if not e:
return get_json_result(
message="Can't find this dataset!", code=RetCode.NOT_FOUND)
e, file = FileService.get_by_id(id)
if not e:
return get_json_result(
message="Can't find this file!", code=RetCode.NOT_FOUND)
doc = DocumentService.insert({
"id": get_uuid(),
"kb_id": kb.id,
"parser_id": FileService.get_parser(file.type, file.name, kb.parser_id),
"parser_config": kb.parser_config,
"created_by": tenant_id,
"type": file.type,
"name": file.name,
"suffix": Path(file.name).suffix.lstrip("."),
"location": file.location,
"size": file.size
})
file2document = File2DocumentService.insert({
"id": get_uuid(),
"file_id": id,
"document_id": doc.id,
})
file2documents.append(file2document.to_json())
return get_json_result(data=file2documents)
except Exception as e:
return server_error_response(e)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/sdk/files.py",
"license": "Apache License 2.0",
"lines": 714,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:mcp/client/streamable_http_client.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
async def main():
try:
# To access RAGFlow server in `host` mode, you need to attach `api_key` for each request to indicate identification.
# async with streamablehttp_client("http://localhost:9382/mcp/", headers={"api_key": "ragflow-fixS-TicrohljzFkeLLWIaVhW7XlXPXIUW5solFor6o"}) as (read_stream, write_stream, _):
# Or follow the requirements of OAuth 2.1 Section 5 with Authorization header
# async with streamablehttp_client("http://localhost:9382/mcp/", headers={"Authorization": "Bearer ragflow-fixS-TicrohljzFkeLLWIaVhW7XlXPXIUW5solFor6o"}) as (read_stream, write_stream, _):
async with streamablehttp_client("http://localhost:9382/mcp/") as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools = await session.list_tools()
print(f"{tools.tools=}")
response = await session.call_tool(name="ragflow_retrieval", arguments={"dataset_ids": ["bc4177924a7a11f09eff238aa5c10c94"], "document_ids": [], "question": "How to install neovim?"})
print(f"Tool response: {response.model_dump()}")
except Exception as e:
print(e)
if __name__ == "__main__":
from anyio import run
run(main)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "mcp/client/streamable_http_client.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:test/testcases/test_web_api/test_document_app/test_create_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import string
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import create_document, list_kbs
from configs import DOCUMENT_NAME_LIMIT, INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils.file_utils import create_txt_file
from api.constants import FILE_NAME_LEN_LIMIT
@pytest.mark.p1
@pytest.mark.usefixtures("clear_datasets")
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = create_document(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDocumentCreate:
@pytest.mark.p3
def test_filename_empty(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
payload = {"name": "", "kb_id": kb_id}
res = create_document(WebApiAuth, payload)
assert res["code"] == 101, res
assert res["message"] == "File name can't be empty.", res
@pytest.mark.p2
def test_filename_max_length(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt")
res = create_document(WebApiAuth, {"name": fp.name, "kb_id": kb_id})
assert res["code"] == 0, res
assert res["data"]["name"] == fp.name, res
@pytest.mark.p2
def test_invalid_kb_id(self, WebApiAuth):
res = create_document(WebApiAuth, {"name": "ragflow_test.txt", "kb_id": "invalid_kb_id"})
assert res["code"] == 102, res
assert res["message"] == "Can't find this dataset!", res
@pytest.mark.p3
def test_filename_special_characters(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
illegal_chars = '<>:"/\\|?*'
translation_table = str.maketrans({char: "_" for char in illegal_chars})
safe_filename = string.punctuation.translate(translation_table)
filename = f"{safe_filename}.txt"
res = create_document(WebApiAuth, {"name": filename, "kb_id": kb_id})
assert res["code"] == 0, res
assert res["data"]["kb_id"] == kb_id, res
assert res["data"]["name"] == filename, f"Expected: {filename}, Got: {res['data']['name']}"
@pytest.mark.p3
def test_concurrent_upload(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
count = 20
filenames = [f"ragflow_test_{i}.txt" for i in range(count)]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(create_document, WebApiAuth, {"name": name, "kb_id": kb_id}) for name in filenames]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures), responses
res = list_kbs(WebApiAuth, {"id": kb_id})
assert res["data"]["kbs"][0]["doc_num"] == count, res
def _run(coro):
return asyncio.run(coro)
@pytest.mark.p2
class TestDocumentCreateUnit:
def test_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"kb_id": "", "name": "doc.txt"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 101
assert res["message"] == 'Lack of "KB ID"'
def test_filename_too_long(self, document_app_module, monkeypatch):
module = document_app_module
long_name = "a" * (FILE_NAME_LEN_LIMIT + 1)
async def fake_request_json():
return {"kb_id": "kb1", "name": long_name}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 101
assert res["message"] == f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less."
def test_filename_whitespace(self, document_app_module, monkeypatch):
module = document_app_module
async def fake_request_json():
return {"kb_id": "kb1", "name": " "}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 101
assert res["message"] == "File name can't be empty."
def test_kb_not_found(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
async def fake_request_json():
return {"kb_id": "missing", "name": "doc.txt"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 102
assert res["message"] == "Can't find this dataset!"
def test_duplicate_name(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", pipeline_id="pipe", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [object()])
async def fake_request_json():
return {"kb_id": "kb1", "name": "doc.txt"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 102
assert "Duplicated document name" in res["message"]
def test_root_folder_missing(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", pipeline_id="pipe", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.FileService, "get_kb_folder", lambda *_args, **_kwargs: None)
async def fake_request_json():
return {"kb_id": "kb1", "name": "doc.txt"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 102
assert res["message"] == "Cannot find the root folder."
def test_kb_folder_missing(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", pipeline_id="pipe", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.FileService, "get_kb_folder", lambda *_args, **_kwargs: {"id": "root"})
monkeypatch.setattr(module.FileService, "new_a_file_from_kb", lambda *_args, **_kwargs: None)
async def fake_request_json():
return {"kb_id": "kb1", "name": "doc.txt"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 102
assert res["message"] == "Cannot find the kb folder for this file."
def test_success(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", pipeline_id="pipe", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.FileService, "get_kb_folder", lambda *_args, **_kwargs: {"id": "root"})
monkeypatch.setattr(module.FileService, "new_a_file_from_kb", lambda *_args, **_kwargs: {"id": "folder"})
class _Doc:
def __init__(self, doc_id):
self.id = doc_id
def to_json(self):
return {"id": self.id, "name": "doc.txt", "kb_id": "kb1"}
def to_dict(self):
return {"id": self.id, "name": "doc.txt", "kb_id": "kb1"}
monkeypatch.setattr(module.DocumentService, "insert", lambda _doc: _Doc("doc1"))
monkeypatch.setattr(module.FileService, "add_file_from_kb", lambda *_args, **_kwargs: None)
async def fake_request_json():
return {"kb_id": "kb1", "name": "doc.txt"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.create.__wrapped__())
assert res["code"] == 0
assert res["data"]["id"] == "doc1"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_document_app/test_create_document.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_document_app/test_list_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
from types import SimpleNamespace
import pytest
from common import list_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils import is_sorted
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = list_documents(invalid_auth, {"kb_id": "dataset_id"})
assert res["code"] == expected_code
assert res["message"] == expected_message
class TestDocumentsList:
@pytest.mark.p1
def test_default(self, WebApiAuth, add_documents):
kb_id, _ = add_documents
res = list_documents(WebApiAuth, {"kb_id": kb_id})
assert res["code"] == 0
assert len(res["data"]["docs"]) == 5
assert res["data"]["total"] == 5
@pytest.mark.p3
@pytest.mark.parametrize(
"kb_id, expected_code, expected_message",
[
("", 101, 'Lack of "KB ID"'),
("invalid_dataset_id", 103, "Only owner of dataset authorized for this operation."),
],
)
def test_invalid_dataset_id(self, WebApiAuth, kb_id, expected_code, expected_message):
res = list_documents(WebApiAuth, {"kb_id": kb_id})
assert res["code"] == expected_code
assert res["message"] == expected_message
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, 5, ""),
({"page": 0, "page_size": 2}, 0, 5, ""),
({"page": 2, "page_size": 2}, 0, 2, ""),
({"page": 3, "page_size": 2}, 0, 1, ""),
({"page": "3", "page_size": 2}, 0, 1, ""),
pytest.param({"page": -1, "page_size": 2}, 100, 0, "1064", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: 'a'")""", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_page(self, WebApiAuth, add_documents, params, expected_code, expected_page_size, expected_message):
kb_id, _ = add_documents
res = list_documents(WebApiAuth, {"kb_id": kb_id, **params})
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["docs"]) == expected_page_size, res
assert res["data"]["total"] == 5, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_code, expected_page_size, expected_message",
[
({"page_size": None}, 0, 5, ""),
({"page_size": 0}, 0, 5, ""),
({"page_size": 1}, 0, 5, ""),
({"page_size": 6}, 0, 5, ""),
({"page_size": "1"}, 0, 5, ""),
pytest.param({"page_size": -1}, 100, 0, "1064", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: 'a'")""", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_page_size(self, WebApiAuth, add_documents, params, expected_code, expected_page_size, expected_message):
kb_id, _ = add_documents
res = list_documents(WebApiAuth, {"kb_id": kb_id, **params})
assert res["code"] == expected_code, res
if expected_code == 0:
assert len(res["data"]["docs"]) == expected_page_size, res
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"orderby": None}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"orderby": "create_time"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"]["docs"], "update_time", True)), ""),
pytest.param({"orderby": "name", "desc": "False"}, 0, lambda r: (is_sorted(r["data"]["docs"], "name", False)), "", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"orderby": "unknown"}, 102, 0, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_orderby(self, WebApiAuth, add_documents, params, expected_code, assertions, expected_message):
kb_id, _ = add_documents
res = list_documents(WebApiAuth, {"kb_id": kb_id, **params})
assert res["code"] == expected_code, res
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message, res
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_code, assertions, expected_message",
[
({"desc": None}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"desc": "true"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"desc": "True"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
({"desc": True}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", True)), ""),
pytest.param({"desc": "false"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", False)), "", marks=pytest.mark.skip(reason="issues/5851")),
({"desc": "False"}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", False)), ""),
({"desc": False}, 0, lambda r: (is_sorted(r["data"]["docs"], "create_time", False)), ""),
({"desc": "False", "orderby": "update_time"}, 0, lambda r: (is_sorted(r["data"]["docs"], "update_time", False)), ""),
pytest.param({"desc": "unknown"}, 102, 0, "desc should be true or false", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_desc(self, WebApiAuth, add_documents, params, expected_code, assertions, expected_message):
kb_id, _ = add_documents
res = list_documents(WebApiAuth, {"kb_id": kb_id, **params})
assert res["code"] == expected_code, res
if expected_code == 0:
if callable(assertions):
assert assertions(res)
else:
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_num",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "0"}, 1),
({"keywords": "ragflow_test_upload"}, 5),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, WebApiAuth, add_documents, params, expected_num):
kb_id, _ = add_documents
res = list_documents(WebApiAuth, {"kb_id": kb_id, **params})
assert res["code"] == 0, res
assert len(res["data"]["docs"]) == expected_num, res
assert res["data"]["total"] == expected_num, res
@pytest.mark.p3
def test_concurrent_list(self, WebApiAuth, add_documents):
kb_id, _ = add_documents
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_documents, WebApiAuth, {"kb_id": kb_id}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures), responses
def _run(coro):
return asyncio.run(coro)
class _DummyArgs(dict):
def get(self, key, default=None):
return super().get(key, default)
@pytest.mark.p2
class TestDocumentsListUnit:
def _set_args(self, module, monkeypatch, **kwargs):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs(kwargs)))
def _allow_kb(self, module, monkeypatch, kb_id="kb1", tenant_id="tenant1"):
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id=tenant_id)])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: True if _kwargs.get("id") == kb_id else False)
def test_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch)
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 101
assert res["message"] == 'Lack of "KB ID"'
def test_unauthorized_dataset(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant1")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: False)
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 103
assert "Only owner of dataset" in res["message"]
def test_return_empty_metadata_flags(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", lambda *_args, **_kwargs: ([], 0))
async def fake_request_json():
return {"return_empty_metadata": "true", "metadata": {"author": "alice"}}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 0
async def fake_request_json_empty():
return {"metadata": {"empty_metadata": True, "author": "alice"}}
monkeypatch.setattr(module, "get_request_json", fake_request_json_empty)
res = _run(module.list_docs())
assert res["code"] == 0
def test_invalid_filters(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
async def fake_request_json():
return {"run_status": ["INVALID"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 102
assert "Invalid filter run status" in res["message"]
async def fake_request_json_types():
return {"types": ["INVALID"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json_types)
res = _run(module.list_docs())
assert res["code"] == 102
assert "Invalid filter conditions" in res["message"]
def test_invalid_metadata_types(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
async def fake_request_json():
return {"metadata_condition": "bad"}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 102
assert "metadata_condition" in res["message"]
async def fake_request_json_meta():
return {"metadata": ["not", "object"]}
monkeypatch.setattr(module, "get_request_json", fake_request_json_meta)
res = _run(module.list_docs())
assert res["code"] == 102
assert "metadata must be an object" in res["message"]
def test_metadata_condition_empty_result(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda *_args, **_kwargs: {})
monkeypatch.setattr(module, "meta_filter", lambda *_args, **_kwargs: set())
async def fake_request_json():
return {"metadata_condition": {"conditions": [{"name": "author", "comparison_operator": "is", "value": "alice"}]}}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 0
assert res["data"]["total"] == 0
def test_metadata_values_intersection(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
metas = {
"author": {"alice": ["doc1", "doc2"]},
"topic": {"rag": ["doc2"]},
}
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda *_args, **_kwargs: metas)
captured = {}
def fake_get_by_kb_id(*_args, **_kwargs):
if len(_args) >= 10:
captured["doc_ids_filter"] = _args[9]
else:
captured["doc_ids_filter"] = None
return ([{"id": "doc2", "thumbnail": "", "parser_config": {}}], 1)
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", fake_get_by_kb_id)
async def fake_request_json():
return {"metadata": {"author": ["alice", " ", None], "topic": "rag"}}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 0
assert captured["doc_ids_filter"] == ["doc2"]
def test_metadata_intersection_empty(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
metas = {
"author": {"alice": ["doc1"]},
"topic": {"rag": ["doc2"]},
}
monkeypatch.setattr(module.DocMetadataService, "get_flatted_meta_by_kbs", lambda *_args, **_kwargs: metas)
async def fake_request_json():
return {"metadata": {"author": "alice", "topic": "rag"}}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 0
assert res["data"]["total"] == 0
def test_desc_time_and_schema(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1", desc="false", create_time_from="150", create_time_to="250")
self._allow_kb(module, monkeypatch)
docs = [
{"id": "doc1", "thumbnail": "", "parser_config": {"metadata": {"a": 1}}, "create_time": 100},
{"id": "doc2", "thumbnail": "", "parser_config": {"metadata": {"b": 2}}, "create_time": 200},
]
def fake_get_by_kb_id(*_args, **_kwargs):
return (docs, 2)
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", fake_get_by_kb_id)
monkeypatch.setattr(module, "turn2jsonschema", lambda _meta: {"schema": True})
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 0
assert len(res["data"]["docs"]) == 1
assert res["data"]["docs"][0]["parser_config"]["metadata"] == {"schema": True}
def test_exception_path(self, document_app_module, monkeypatch):
module = document_app_module
self._set_args(module, monkeypatch, kb_id="kb1")
self._allow_kb(module, monkeypatch)
def raise_error(*_args, **_kwargs):
raise RuntimeError("boom")
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", raise_error)
async def fake_request_json():
return {}
monkeypatch.setattr(module, "get_request_json", fake_request_json)
res = _run(module.list_docs())
assert res["code"] == 100
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_document_app/test_list_documents.py",
"license": "Apache License 2.0",
"lines": 329,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_document_app/test_paser_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
from types import SimpleNamespace
import pytest
from common import bulk_upload_documents, list_documents, parse_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils import wait_for
def _run(coro):
return asyncio.run(coro)
@wait_for(30, 1, "Document parsing timeout")
def condition(_auth, _kb_id, _document_ids=None):
res = list_documents(_auth, {"kb_id": _kb_id})
target_docs = res["data"]["docs"]
if _document_ids is None:
for doc in target_docs:
if doc["run"] != "3":
return False
return True
target_ids = set(_document_ids)
for doc in target_docs:
if doc["id"] in target_ids:
if doc.get("run") != "3":
return False
return True
def validate_document_parse_done(auth, _kb_id, _document_ids):
res = list_documents(auth, {"kb_id": _kb_id})
for doc in res["data"]["docs"]:
if doc["id"] not in _document_ids:
continue
assert doc["run"] == "3"
assert len(doc["process_begin_at"]) > 0
assert doc["process_duration"] > 0
assert doc["progress"] > 0
assert "Task done" in doc["progress_msg"]
def validate_document_parse_cancel(auth, _kb_id, _document_ids):
res = list_documents(auth, {"kb_id": _kb_id})
for doc in res["data"]["docs"]:
if doc["id"] not in _document_ids:
continue
assert doc["run"] == "2"
assert len(doc["process_begin_at"]) > 0
assert doc["progress"] == 0.0
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = parse_documents(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDocumentsParse:
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param(None, 101, "required argument are missing: doc_ids, run; ", marks=pytest.mark.skip),
pytest.param({"doc_ids": [], "run": "1"}, 0, "", marks=pytest.mark.p1),
pytest.param({"doc_ids": ["invalid_id"], "run": "1"}, 109, "No authorization.", marks=pytest.mark.p3),
pytest.param({"doc_ids": ["\n!?ใ๏ผ๏ผ๏ผ\"'"], "run": "1"}, 109, "No authorization.", marks=pytest.mark.p3),
pytest.param("not json", 101, "required argument are missing: doc_ids, run; ", marks=pytest.mark.skip),
pytest.param(lambda r: {"doc_ids": r[:1], "run": "1"}, 0, "", marks=pytest.mark.p1),
pytest.param(lambda r: {"doc_ids": r, "run": "1"}, 0, "", marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, WebApiAuth, add_documents_func, payload, expected_code, expected_message):
kb_id, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = parse_documents(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
condition(WebApiAuth, kb_id, payload["doc_ids"])
validate_document_parse_done(WebApiAuth, kb_id, payload["doc_ids"])
else:
assert res["message"] == expected_message, res
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"doc_ids": ["invalid_id"] + r, "run": "1"}, marks=pytest.mark.p3),
pytest.param(lambda r: {"doc_ids": r[:1] + ["invalid_id"] + r[1:3], "run": "1"}, marks=pytest.mark.p1),
pytest.param(lambda r: {"doc_ids": r + ["invalid_id"], "run": "1"}, marks=pytest.mark.p3),
],
)
def test_parse_partial_invalid_document_id(self, WebApiAuth, add_documents_func, payload):
_, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = parse_documents(WebApiAuth, payload)
assert res["code"] == 109, res
assert res["message"] == "No authorization.", res
@pytest.mark.p3
def test_repeated_parse(self, WebApiAuth, add_documents_func):
kb_id, document_ids = add_documents_func
res = parse_documents(WebApiAuth, {"doc_ids": document_ids, "run": "1"})
assert res["code"] == 0, res
condition(WebApiAuth, kb_id, document_ids)
res = parse_documents(WebApiAuth, {"doc_ids": document_ids, "run": "1"})
assert res["code"] == 0, res
@pytest.mark.p3
def test_duplicate_parse(self, WebApiAuth, add_documents_func):
kb_id, document_ids = add_documents_func
res = parse_documents(WebApiAuth, {"doc_ids": document_ids + document_ids, "run": "1"})
assert res["code"] == 0, res
assert res["message"] == "success", res
condition(WebApiAuth, kb_id, document_ids)
validate_document_parse_done(WebApiAuth, kb_id, document_ids)
@pytest.mark.p3
def test_parse_100_files(WebApiAuth, add_dataset_func, tmp_path):
@wait_for(100, 1, "Document parsing timeout")
def condition(_auth, _kb_id, _document_num):
res = list_documents(_auth, {"kb_id": _kb_id, "page_size": _document_num})
for doc in res["data"]["docs"]:
if doc["run"] != "3":
return False
return True
document_num = 100
kb_id = add_dataset_func
document_ids = bulk_upload_documents(WebApiAuth, kb_id, document_num, tmp_path)
res = parse_documents(WebApiAuth, {"doc_ids": document_ids, "run": "1"})
assert res["code"] == 0, res
condition(WebApiAuth, kb_id, document_num)
validate_document_parse_done(WebApiAuth, kb_id, document_ids)
@pytest.mark.p3
def test_concurrent_parse(WebApiAuth, add_dataset_func, tmp_path):
@wait_for(120, 1, "Document parsing timeout")
def condition(_auth, _kb_id, _document_num):
res = list_documents(_auth, {"kb_id": _kb_id, "page_size": _document_num})
for doc in res["data"]["docs"]:
if doc["run"] != "3":
return False
return True
count = 100
kb_id = add_dataset_func
document_ids = bulk_upload_documents(WebApiAuth, kb_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
parse_documents,
WebApiAuth,
{"doc_ids": [document_ids[i]], "run": "1"},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
condition(WebApiAuth, kb_id, count)
validate_document_parse_done(WebApiAuth, kb_id, document_ids)
@pytest.mark.p2
class TestDocumentsParseUnit:
def test_run_branch_matrix_unit(self, document_app_module, monkeypatch):
module = document_app_module
calls = {"clear": [], "filter_delete": [], "docstore_delete": [], "cancel": [], "run": []}
async def fake_thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
monkeypatch.setattr(module, "server_error_response", lambda e: {"code": 500, "message": str(e)})
monkeypatch.setattr(module.search, "index_name", lambda tenant_id: f"idx_{tenant_id}")
monkeypatch.setattr(module, "cancel_all_task_of", lambda doc_id: calls["cancel"].append(doc_id))
class _DocStore:
def index_exist(self, _index_name, _kb_id):
return True
def delete(self, where, _index_name, _kb_id):
calls["docstore_delete"].append(where["doc_id"])
monkeypatch.setattr(module.settings, "docStoreConn", _DocStore())
async def set_request(payload):
return payload
def apply_request(payload):
async def fake_request_json():
return await set_request(payload)
monkeypatch.setattr(module, "get_request_json", fake_request_json)
apply_request({"doc_ids": ["doc1"], "run": module.TaskStatus.RUNNING.value})
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: False)
res = _run(module.run.__wrapped__())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
monkeypatch.setattr(module.DocumentService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: None)
res = _run(module.run.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Tenant not found!" in res["message"]
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant1")
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.run.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Document not found!" in res["message"]
apply_request({"doc_ids": ["doc1"], "run": module.TaskStatus.CANCEL.value})
doc_cancel = SimpleNamespace(id="doc1", run=module.TaskStatus.DONE.value, kb_id="kb1", parser_config={}, to_dict=lambda: {"id": "doc1"})
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, doc_cancel))
monkeypatch.setattr(module.TaskService, "query", lambda **_kwargs: [SimpleNamespace(progress=1)])
res = _run(module.run.__wrapped__())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Cannot cancel a task that is not in RUNNING status" in res["message"]
apply_request({"doc_ids": ["doc1"], "run": module.TaskStatus.RUNNING.value, "delete": True})
doc_rerun = SimpleNamespace(id="doc1", run=module.TaskStatus.DONE.value, kb_id="kb1", parser_config={}, to_dict=lambda: {"id": "doc1"})
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, doc_rerun))
monkeypatch.setattr(module.DocumentService, "clear_chunk_num_when_rerun", lambda doc_id: calls["clear"].append(doc_id))
monkeypatch.setattr(module.TaskService, "filter_delete", lambda _filters: calls["filter_delete"].append(True))
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DocumentService, "run", lambda tenant_id, doc_dict, _kb_map: calls["run"].append((tenant_id, doc_dict)))
res = _run(module.run.__wrapped__())
assert res["code"] == 0
assert calls["clear"] == ["doc1"]
assert calls["filter_delete"] == [True]
assert calls["docstore_delete"] == ["doc1"]
assert calls["run"] == [("tenant1", {"id": "doc1"})]
apply_request({"doc_ids": ["doc1"], "run": module.TaskStatus.RUNNING.value, "apply_kb": True})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(module.run.__wrapped__())
assert res["code"] == 500
assert "Can't find this dataset!" in res["message"]
apply_request({"doc_ids": ["doc1"], "run": module.TaskStatus.RUNNING.value})
def raise_run_error(*_args, **_kwargs):
raise RuntimeError("run boom")
monkeypatch.setattr(module.DocumentService, "run", raise_run_error)
res = _run(module.run.__wrapped__())
assert res["code"] == 500
assert "run boom" in res["message"]
# @pytest.mark.skip
class TestDocumentsParseStop:
@pytest.mark.parametrize(
"payload, expected_code, expected_message",
[
pytest.param(None, 101, "required argument are missing: doc_ids, run; ", marks=pytest.mark.skip),
pytest.param({"doc_ids": [], "run": "2"}, 0, "", marks=pytest.mark.p1),
pytest.param({"doc_ids": ["invalid_id"], "run": "2"}, 109, "No authorization.", marks=pytest.mark.p3),
pytest.param({"doc_ids": ["\n!?ใ๏ผ๏ผ๏ผ\"'"], "run": "2"}, 109, "No authorization.", marks=pytest.mark.p3),
pytest.param("not json", 101, "required argument are missing: doc_ids, run; ", marks=pytest.mark.skip),
pytest.param(lambda r: {"doc_ids": r[:1], "run": "2"}, 0, "", marks=pytest.mark.p1),
pytest.param(lambda r: {"doc_ids": r, "run": "2"}, 0, "", marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, WebApiAuth, add_documents_func, payload, expected_code, expected_message):
@wait_for(10, 1, "Document parsing timeout")
def condition(_auth, _kb_id, _doc_ids):
res = list_documents(_auth, {"kb_id": _kb_id})
for doc in res["data"]["docs"]:
if doc["id"] in _doc_ids:
if doc["run"] != "3":
return False
return True
kb_id, document_ids = add_documents_func
parse_documents(WebApiAuth, {"doc_ids": document_ids, "run": "1"})
if callable(payload):
payload = payload(document_ids)
res = parse_documents(WebApiAuth, payload)
assert res["code"] == expected_code, res
if expected_code == 0:
completed_document_ids = list(set(document_ids) - set(payload["doc_ids"]))
condition(WebApiAuth, kb_id, completed_document_ids)
validate_document_parse_cancel(WebApiAuth, kb_id, payload["doc_ids"])
validate_document_parse_done(WebApiAuth, kb_id, completed_document_ids)
else:
assert res["message"] == expected_message, res
@pytest.mark.skip
@pytest.mark.parametrize(
"payload",
[
lambda r: {"doc_ids": ["invalid_id"] + r, "run": "2"},
lambda r: {"doc_ids": r[:1] + ["invalid_id"] + r[1:3], "run": "2"},
lambda r: {"doc_ids": r + ["invalid_id"], "run": "2"},
],
)
def test_stop_parse_partial_invalid_document_id(self, WebApiAuth, add_documents_func, payload):
kb_id, document_ids = add_documents_func
parse_documents(WebApiAuth, {"doc_ids": document_ids, "run": "1"})
if callable(payload):
payload = payload(document_ids)
res = parse_documents(WebApiAuth, payload)
assert res["code"] == 109, res
assert res["message"] == "No authorization.", res
validate_document_parse_cancel(WebApiAuth, kb_id, document_ids)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_document_app/test_paser_documents.py",
"license": "Apache License 2.0",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_document_app/test_rm_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents, delete_document, list_documents
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
def _run(coro):
return asyncio.run(coro)
@pytest.mark.p2
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = delete_document(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDocumentsDeletion:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_code, expected_message, remaining",
[
(None, 101, "required argument are missing: doc_id; ", 3),
({"doc_id": ""}, 109, "No authorization.", 3),
({"doc_id": "invalid_id"}, 109, "No authorization.", 3),
({"doc_id": "\n!?ใ๏ผ๏ผ๏ผ\"'"}, 109, "No authorization.", 3),
("not json", 101, "required argument are missing: doc_id; ", 3),
(lambda r: {"doc_id": r[0]}, 0, "", 2),
],
)
def test_basic_scenarios(self, WebApiAuth, add_documents_func, payload, expected_code, expected_message, remaining):
kb_id, document_ids = add_documents_func
if callable(payload):
payload = payload(document_ids)
res = delete_document(WebApiAuth, payload)
assert res["code"] == expected_code, res
if res["code"] != 0:
assert res["message"] == expected_message, res
res = list_documents(WebApiAuth, {"kb_id": kb_id})
assert len(res["data"]["docs"]) == remaining, res
assert res["data"]["total"] == remaining, res
@pytest.mark.p2
def test_repeated_deletion(self, WebApiAuth, add_documents_func):
_, document_ids = add_documents_func
for doc_id in document_ids:
res = delete_document(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
for doc_id in document_ids:
res = delete_document(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 109, res
assert res["message"] == "No authorization.", res
@pytest.mark.p2
class TestDocumentsDeletionUnit:
def test_rm_string_doc_id_normalization_success_unit(self, document_app_module, monkeypatch):
module = document_app_module
captured = {}
async def fake_request_json():
return {"doc_id": "doc1"}
async def fake_thread_pool_exec(func, doc_ids, user_id):
captured["func"] = func
captured["doc_ids"] = doc_ids
captured["user_id"] = user_id
return None
monkeypatch.setattr(module, "get_request_json", fake_request_json)
monkeypatch.setattr(module.DocumentService, "accessible4deletion", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
res = _run(module.rm.__wrapped__())
assert res["code"] == 0
assert res["data"] is True
assert captured["func"] == module.FileService.delete_docs
assert captured["doc_ids"] == ["doc1"]
assert captured["user_id"] == module.current_user.id
@pytest.mark.p3
def test_concurrent_deletion(WebApiAuth, add_dataset, tmp_path):
count = 100
kb_id = add_dataset
document_ids = bulk_upload_documents(WebApiAuth, kb_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(delete_document, WebApiAuth, {"doc_id": document_ids[i]}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures), responses
@pytest.mark.p3
def test_delete_100(WebApiAuth, add_dataset, tmp_path):
documents_num = 100
kb_id = add_dataset
document_ids = bulk_upload_documents(WebApiAuth, kb_id, documents_num, tmp_path)
res = list_documents(WebApiAuth, {"kb_id": kb_id})
assert res["data"]["total"] == documents_num, res
for doc_id in document_ids:
res = delete_document(WebApiAuth, {"doc_id": doc_id})
assert res["code"] == 0, res
res = list_documents(WebApiAuth, {"kb_id": kb_id})
assert res["data"]["total"] == 0, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_document_app/test_rm_documents.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_document_app/test_upload_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import sys
import string
from types import ModuleType, SimpleNamespace
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import list_kbs, upload_documents
from configs import DOCUMENT_NAME_LIMIT, INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils.file_utils import create_txt_file
from api.constants import FILE_NAME_LEN_LIMIT
@pytest.mark.p1
@pytest.mark.usefixtures("clear_datasets")
class TestAuthorization:
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_invalid_auth(self, invalid_auth, expected_code, expected_message):
res = upload_documents(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDocumentsUpload:
@pytest.mark.p1
def test_valid_single_upload(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp])
assert res["code"] == 0, res
assert res["data"][0]["kb_id"] == kb_id, res
assert res["data"][0]["name"] == fp.name, res
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(self, WebApiAuth, add_dataset_func, generate_test_files, request):
kb_id = add_dataset_func
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp])
assert res["code"] == 0, res
assert res["data"][0]["kb_id"] == kb_id, res
assert res["data"][0]["name"] == fp.name, res
@pytest.mark.p3
@pytest.mark.parametrize(
"file_type",
["exe", "unknown"],
)
def test_unsupported_file_type(self, WebApiAuth, add_dataset_func, tmp_path, file_type):
kb_id = add_dataset_func
fp = tmp_path / f"ragflow_test.{file_type}"
fp.touch()
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp])
assert res["code"] == 500, res
assert res["message"] == f"ragflow_test.{file_type}: This type of file has not been supported yet!", res
@pytest.mark.p2
def test_missing_file(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
res = upload_documents(WebApiAuth, {"kb_id": kb_id})
assert res["code"] == 101, res
assert res["message"] == "No file part!", res
@pytest.mark.p3
def test_empty_file(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fp = tmp_path / "empty.txt"
fp.touch()
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp])
assert res["code"] == 0, res
assert res["data"][0]["size"] == 0, res
@pytest.mark.p3
def test_filename_empty(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp], filename_override="")
assert res["code"] == 101, res
assert res["message"] == "No file selected!", res
@pytest.mark.p3
def test_filename_exceeds_max_length(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt")
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp])
assert res["code"] == 0, res
assert res["data"][0]["name"] == fp.name, res
@pytest.mark.p2
def test_invalid_kb_id(self, WebApiAuth, tmp_path):
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(WebApiAuth, {"kb_id": "invalid_kb_id"}, [fp])
assert res["code"] == 100, res
assert res["message"] == """LookupError("Can't find this dataset!")""", res
@pytest.mark.p2
def test_duplicate_files(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp, fp])
assert res["code"] == 0, res
assert len(res["data"]) == 2, res
for i in range(len(res["data"])):
assert res["data"][i]["kb_id"] == kb_id, res
expected_name = fp.name
if i != 0:
expected_name = f"{fp.stem}({i}){fp.suffix}"
assert res["data"][i]["name"] == expected_name, res
@pytest.mark.p3
def test_filename_special_characters(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
illegal_chars = '<>:"/\\|?*'
translation_table = str.maketrans({char: "_" for char in illegal_chars})
safe_filename = string.punctuation.translate(translation_table)
fp = tmp_path / f"{safe_filename}.txt"
fp.write_text("Sample text content")
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, [fp])
assert res["code"] == 0, res
assert len(res["data"]) == 1, res
assert res["data"][0]["kb_id"] == kb_id, res
assert res["data"][0]["name"] == fp.name, res
@pytest.mark.p1
def test_multiple_files(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
expected_document_count = 20
fps = []
for i in range(expected_document_count):
fp = create_txt_file(tmp_path / f"ragflow_test_{i}.txt")
fps.append(fp)
res = upload_documents(WebApiAuth, {"kb_id": kb_id}, fps)
assert res["code"] == 0, res
res = list_kbs(WebApiAuth)
assert res["data"]["kbs"][0]["doc_num"] == expected_document_count, res
@pytest.mark.p3
def test_concurrent_upload(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
count = 20
fps = []
for i in range(count):
fp = create_txt_file(tmp_path / f"ragflow_test_{i}.txt")
fps.append(fp)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(upload_documents, WebApiAuth, {"kb_id": kb_id}, fps[i : i + 1]) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures), responses
res = list_kbs(WebApiAuth)
assert res["data"]["kbs"][0]["doc_num"] == count, res
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _coro():
return self._value
return _coro().__await__()
class _DummyFiles(dict):
def getlist(self, key):
value = self.get(key, [])
if isinstance(value, list):
return value
return [value]
class _DummyFile:
def __init__(self, filename):
self.filename = filename
self.closed = False
self.stream = self
def close(self):
self.closed = True
class _DummyRequest:
def __init__(self, form=None, files=None):
self._form = form or {}
self._files = files or _DummyFiles()
@property
def form(self):
return _AwaitableValue(self._form)
@property
def files(self):
return _AwaitableValue(self._files)
def _run(coro):
return asyncio.run(coro)
@pytest.mark.p2
class TestDocumentsUploadUnit:
def test_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": ""}, files=_DummyFiles()))
res = _run(module.upload.__wrapped__())
assert res["code"] == 101
assert res["message"] == 'Lack of "KB ID"'
def test_missing_file_part(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1"}, files=_DummyFiles()))
res = _run(module.upload.__wrapped__())
assert res["code"] == 101
assert res["message"] == "No file part!"
def test_empty_filename_closes_files(self, document_app_module, monkeypatch):
module = document_app_module
file_obj = _DummyFile("")
files = _DummyFiles({"file": [file_obj]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1"}, files=files))
res = _run(module.upload.__wrapped__())
assert res["code"] == 101
assert res["message"] == "No file selected!"
assert file_obj.closed is True
def test_filename_too_long(self, document_app_module, monkeypatch):
module = document_app_module
long_name = "a" * (FILE_NAME_LEN_LIMIT + 1)
file_obj = _DummyFile(long_name)
files = _DummyFiles({"file": [file_obj]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1"}, files=files))
res = _run(module.upload.__wrapped__())
assert res["code"] == 101
assert res["message"] == f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less."
def test_invalid_kb_id_raises(self, document_app_module, monkeypatch):
module = document_app_module
file_obj = _DummyFile("ragflow_test.txt")
files = _DummyFiles({"file": [file_obj]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "missing"}, files=files))
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
with pytest.raises(LookupError):
_run(module.upload.__wrapped__())
def test_no_permission(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: False)
file_obj = _DummyFile("ragflow_test.txt")
files = _DummyFiles({"file": [file_obj]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1"}, files=files))
res = _run(module.upload.__wrapped__())
assert res["code"] == 109
assert res["message"] == "No authorization."
def test_thread_pool_errors(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: True)
async def fake_thread_pool_exec(*_args, **_kwargs):
return (["unsupported type"], [("file1", "blob")])
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
file_obj = _DummyFile("ragflow_test.txt")
files = _DummyFiles({"file": [file_obj]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1"}, files=files))
res = _run(module.upload.__wrapped__())
assert res["code"] == 500
assert "unsupported type" in res["message"]
assert res["data"] == ["file1"]
def test_empty_upload_result(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: True)
async def fake_thread_pool_exec(*_args, **_kwargs):
return (None, [])
monkeypatch.setattr(module, "thread_pool_exec", fake_thread_pool_exec)
file_obj = _DummyFile("ragflow_test.txt")
files = _DummyFiles({"file": [file_obj]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1"}, files=files))
res = _run(module.upload.__wrapped__())
assert res["code"] == 102
assert "file format" in res["message"]
def test_upload_and_parse_matrix_unit(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(form={"conversation_id": "conv-1"}, files=_DummyFiles({"file": [_DummyFile("")]})))
res = _run(module.upload_and_parse.__wrapped__())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert res["message"] == "No file selected!"
files = _DummyFiles({"file": [_DummyFile("note.txt")]})
monkeypatch.setattr(module, "request", _DummyRequest(form={"conversation_id": "conv-1"}, files=files))
monkeypatch.setattr(module, "doc_upload_and_parse", lambda _conv_id, _files, _uid: ["doc-1"])
res = _run(module.upload_and_parse.__wrapped__())
assert res["code"] == 0
assert res["data"] == ["doc-1"]
def test_parse_url_and_multipart_matrix_unit(self, document_app_module, monkeypatch, tmp_path):
module = document_app_module
async def req_invalid_url():
return {"url": "not-a-url"}
monkeypatch.setattr(module, "get_request_json", req_invalid_url)
monkeypatch.setattr(module, "is_valid_url", lambda _url: False)
res = _run(module.parse())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert res["message"] == "The URL format is invalid"
webdriver_mod = ModuleType("seleniumwire.webdriver")
class _FakeChromeOptions:
def __init__(self):
self.args = []
self.experimental = {}
def add_argument(self, arg):
self.args.append(arg)
def add_experimental_option(self, key, value):
self.experimental[key] = value
class _Req:
def __init__(self, headers):
self.response = SimpleNamespace(headers=headers)
class _FakeDriver:
def __init__(self, requests, page_source):
self.requests = requests
self.page_source = page_source
self.quit_called = False
self.visited = []
self.options = None
def get(self, url):
self.visited.append(url)
def quit(self):
self.quit_called = True
queue = []
created = []
def _fake_chrome(options=None):
driver = queue.pop(0)
driver.options = options
created.append(driver)
return driver
webdriver_mod.Chrome = _fake_chrome
webdriver_mod.ChromeOptions = _FakeChromeOptions
seleniumwire_mod = ModuleType("seleniumwire")
seleniumwire_mod.webdriver = webdriver_mod
monkeypatch.setitem(sys.modules, "seleniumwire", seleniumwire_mod)
monkeypatch.setitem(sys.modules, "seleniumwire.webdriver", webdriver_mod)
monkeypatch.setattr(module, "get_project_base_directory", lambda: str(tmp_path))
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
class _Parser:
def parser_txt(self, page_source):
assert "page" in page_source
return ["section1", "section2"]
monkeypatch.setattr(module, "RAGFlowHtmlParser", lambda: _Parser())
queue.append(_FakeDriver([_Req({"x": "1"}), _Req({"y": "2"})], "<html>page</html>"))
async def req_url_html():
return {"url": "http://example.com/html"}
monkeypatch.setattr(module, "get_request_json", req_url_html)
res = _run(module.parse())
assert res["code"] == 0
assert res["data"] == "section1\nsection2"
assert created[-1].quit_called is True
(tmp_path / "logs" / "downloads").mkdir(parents=True, exist_ok=True)
(tmp_path / "logs" / "downloads" / "doc.txt").write_bytes(b"downloaded-bytes")
queue.append(_FakeDriver([_Req({"content-disposition": 'attachment; filename="doc.txt"'})], "<html>file</html>"))
captured = {}
def parse_docs_read(files, _uid):
captured["filename"] = files[0].filename
captured["content"] = files[0].read()
return "parsed-download"
monkeypatch.setattr(module.FileService, "parse_docs", parse_docs_read)
async def req_url_file():
return {"url": "http://example.com/file"}
monkeypatch.setattr(module, "get_request_json", req_url_file)
res = _run(module.parse())
assert res["code"] == 0
assert res["data"] == "parsed-download"
assert captured["filename"] == "doc.txt"
assert captured["content"] == b"downloaded-bytes"
async def req_no_url():
return {}
monkeypatch.setattr(module, "get_request_json", req_no_url)
monkeypatch.setattr(module, "request", _DummyRequest(files=_DummyFiles()))
res = _run(module.parse())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert res["message"] == "No file part!"
monkeypatch.setattr(module, "request", _DummyRequest(files=_DummyFiles({"file": [_DummyFile("f1.txt")]})))
monkeypatch.setattr(module.FileService, "parse_docs", lambda _files, _uid: "parsed-upload")
res = _run(module.parse())
assert res["code"] == 0
assert res["data"] == "parsed-upload"
@pytest.mark.p2
class TestWebCrawlUnit:
def test_missing_kb_id(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "", "name": "doc", "url": "http://example.com"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 101
assert res["message"] == 'Lack of "KB ID"'
def test_invalid_url(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1", "name": "doc", "url": "not-a-url"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 101
assert res["message"] == "The URL format is invalid"
def test_invalid_kb_id_raises(self, document_app_module, monkeypatch):
module = document_app_module
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "missing", "name": "doc", "url": "http://example.com"}))
with pytest.raises(LookupError):
_run(module.web_crawl.__wrapped__())
def test_no_permission(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1", "name": "doc", "url": "http://example.com"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 109
assert res["message"] == "No authorization."
def test_download_failure(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "html2pdf", lambda _url: None)
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1", "name": "doc", "url": "http://example.com"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 100
assert "Download failure" in res["message"]
def test_unsupported_type(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "html2pdf", lambda _url: b"%PDF-1.4")
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
monkeypatch.setattr(module.FileService, "init_knowledgebase_docs", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.FileService, "get_kb_folder", lambda *_args, **_kwargs: {"id": "kb_root"})
monkeypatch.setattr(module.FileService, "new_a_file_from_kb", lambda *_args, **_kwargs: {"id": "kb_folder"})
monkeypatch.setattr(module, "duplicate_name", lambda *_args, **_kwargs: "bad.exe")
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1", "name": "doc", "url": "http://example.com"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 100
assert "supported yet" in res["message"]
@pytest.mark.parametrize(
"filename,filetype,expected_parser",
[
("image.png", "visual", "picture"),
("sound.mp3", "aural", "audio"),
("deck.pptx", "doc", "presentation"),
("mail.eml", "doc", "email"),
],
)
def test_success_parser_overrides(self, document_app_module, monkeypatch, filename, filetype, expected_parser):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
captured = {}
class _Storage:
def obj_exist(self, *_args, **_kwargs):
return False
def put(self, *_args, **_kwargs):
captured["put"] = True
def insert_doc(doc):
captured["doc"] = doc
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "html2pdf", lambda _url: b"%PDF-1.4")
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
monkeypatch.setattr(module.FileService, "init_knowledgebase_docs", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.FileService, "get_kb_folder", lambda *_args, **_kwargs: {"id": "kb_root"})
monkeypatch.setattr(module.FileService, "new_a_file_from_kb", lambda *_args, **_kwargs: {"id": "kb_folder"})
monkeypatch.setattr(module, "duplicate_name", lambda *_args, **_kwargs: filename)
monkeypatch.setattr(module, "filename_type", lambda _name: filetype)
monkeypatch.setattr(module, "thumbnail", lambda *_args, **_kwargs: "")
monkeypatch.setattr(module, "get_uuid", lambda: "doc-1")
monkeypatch.setattr(module.settings, "STORAGE_IMPL", _Storage())
monkeypatch.setattr(module.DocumentService, "insert", insert_doc)
monkeypatch.setattr(module.FileService, "add_file_from_kb", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1", "name": "doc", "url": "http://example.com"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 0
assert captured["doc"]["parser_id"] == expected_parser
assert captured["put"] is True
def test_exception_path(self, document_app_module, monkeypatch):
module = document_app_module
kb = SimpleNamespace(id="kb1", tenant_id="tenant1", name="kb", parser_id="parser", parser_config={})
class _Storage:
def obj_exist(self, *_args, **_kwargs):
return False
def put(self, *_args, **_kwargs):
return None
def insert_doc(_doc):
raise RuntimeError("boom")
monkeypatch.setattr(module, "is_valid_url", lambda _url: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module, "check_kb_team_permission", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "html2pdf", lambda _url: b"%PDF-1.4")
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
monkeypatch.setattr(module.FileService, "init_knowledgebase_docs", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module.FileService, "get_kb_folder", lambda *_args, **_kwargs: {"id": "kb_root"})
monkeypatch.setattr(module.FileService, "new_a_file_from_kb", lambda *_args, **_kwargs: {"id": "kb_folder"})
monkeypatch.setattr(module, "duplicate_name", lambda *_args, **_kwargs: "doc.pdf")
monkeypatch.setattr(module, "filename_type", lambda _name: "pdf")
monkeypatch.setattr(module, "thumbnail", lambda *_args, **_kwargs: "")
monkeypatch.setattr(module, "get_uuid", lambda: "doc-1")
monkeypatch.setattr(module.settings, "STORAGE_IMPL", _Storage())
monkeypatch.setattr(module.DocumentService, "insert", insert_doc)
monkeypatch.setattr(module.FileService, "add_file_from_kb", lambda *_args, **_kwargs: None)
monkeypatch.setattr(module, "request", _DummyRequest(form={"kb_id": "kb1", "name": "doc", "url": "http://example.com"}))
res = _run(module.web_crawl.__wrapped__())
assert res["code"] == 100
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_document_app/test_upload_documents.py",
"license": "Apache License 2.0",
"lines": 511,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/apps/mcp_server_app.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from quart import Response, request
from api.apps import current_user, login_required
from api.db.db_models import MCPServer
from api.db.services.mcp_server_service import MCPServerService
from api.db.services.user_service import TenantService
from common.constants import RetCode, VALID_MCP_SERVER_TYPES
from common.misc_utils import get_uuid, thread_pool_exec
from api.utils.api_utils import get_data_error_result, get_json_result, get_mcp_tools, get_request_json, server_error_response, validate_request
from api.utils.web_utils import get_float, safe_json_parse
from common.mcp_tool_call_conn import MCPToolCallSession, close_multiple_mcp_toolcall_sessions
@manager.route("/list", methods=["POST"]) # noqa: F821
@login_required
async def list_mcp() -> Response:
keywords = request.args.get("keywords", "")
page_number = int(request.args.get("page", 0))
items_per_page = int(request.args.get("page_size", 0))
orderby = request.args.get("orderby", "create_time")
if request.args.get("desc", "true").lower() == "false":
desc = False
else:
desc = True
req = await get_request_json()
mcp_ids = req.get("mcp_ids", [])
try:
servers = MCPServerService.get_servers(current_user.id, mcp_ids, 0, 0, orderby, desc, keywords) or []
total = len(servers)
if page_number and items_per_page:
servers = servers[(page_number - 1) * items_per_page : page_number * items_per_page]
return get_json_result(data={"mcp_servers": servers, "total": total})
except Exception as e:
return server_error_response(e)
@manager.route("/detail", methods=["GET"]) # noqa: F821
@login_required
def detail() -> Response:
mcp_id = request.args["mcp_id"]
try:
mcp_server = MCPServerService.get_or_none(id=mcp_id, tenant_id=current_user.id)
if mcp_server is None:
return get_json_result(code=RetCode.NOT_FOUND, data=None)
return get_json_result(data=mcp_server.to_dict())
except Exception as e:
return server_error_response(e)
@manager.route("/create", methods=["POST"]) # noqa: F821
@login_required
@validate_request("name", "url", "server_type")
async def create() -> Response:
req = await get_request_json()
server_type = req.get("server_type", "")
if server_type not in VALID_MCP_SERVER_TYPES:
return get_data_error_result(message="Unsupported MCP server type.")
server_name = req.get("name", "")
if not server_name or len(server_name.encode("utf-8")) > 255:
return get_data_error_result(message=f"Invalid MCP name or length is {len(server_name)} which is large than 255.")
e, _ = MCPServerService.get_by_name_and_tenant(name=server_name, tenant_id=current_user.id)
if e:
return get_data_error_result(message="Duplicated MCP server name.")
url = req.get("url", "")
if not url:
return get_data_error_result(message="Invalid url.")
headers = safe_json_parse(req.get("headers", {}))
req["headers"] = headers
variables = safe_json_parse(req.get("variables", {}))
variables.pop("tools", None)
timeout = get_float(req, "timeout", 10)
try:
req["id"] = get_uuid()
req["tenant_id"] = current_user.id
e, _ = TenantService.get_by_id(current_user.id)
if not e:
return get_data_error_result(message="Tenant not found.")
mcp_server = MCPServer(id=server_name, name=server_name, url=url, server_type=server_type, variables=variables, headers=headers)
server_tools, err_message = await thread_pool_exec(get_mcp_tools, [mcp_server], timeout)
if err_message:
return get_data_error_result(err_message)
tools = server_tools[server_name]
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
variables["tools"] = tools
req["variables"] = variables
if not MCPServerService.insert(**req):
return get_data_error_result("Failed to create MCP server.")
return get_json_result(data=req)
except Exception as e:
return server_error_response(e)
@manager.route("/update", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcp_id")
async def update() -> Response:
req = await get_request_json()
mcp_id = req.get("mcp_id", "")
e, mcp_server = MCPServerService.get_by_id(mcp_id)
if not e or mcp_server.tenant_id != current_user.id:
return get_data_error_result(message=f"Cannot find MCP server {mcp_id} for user {current_user.id}")
server_type = req.get("server_type", mcp_server.server_type)
if server_type and server_type not in VALID_MCP_SERVER_TYPES:
return get_data_error_result(message="Unsupported MCP server type.")
server_name = req.get("name", mcp_server.name)
if server_name and len(server_name.encode("utf-8")) > 255:
return get_data_error_result(message=f"Invalid MCP name or length is {len(server_name)} which is large than 255.")
url = req.get("url", mcp_server.url)
if not url:
return get_data_error_result(message="Invalid url.")
headers = safe_json_parse(req.get("headers", mcp_server.headers))
req["headers"] = headers
variables = safe_json_parse(req.get("variables", mcp_server.variables))
variables.pop("tools", None)
timeout = get_float(req, "timeout", 10)
try:
req["tenant_id"] = current_user.id
req.pop("mcp_id", None)
req["id"] = mcp_id
mcp_server = MCPServer(id=server_name, name=server_name, url=url, server_type=server_type, variables=variables, headers=headers)
server_tools, err_message = await thread_pool_exec(get_mcp_tools, [mcp_server], timeout)
if err_message:
return get_data_error_result(err_message)
tools = server_tools[server_name]
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
variables["tools"] = tools
req["variables"] = variables
if not MCPServerService.filter_update([MCPServer.id == mcp_id, MCPServer.tenant_id == current_user.id], req):
return get_data_error_result(message="Failed to updated MCP server.")
e, updated_mcp = MCPServerService.get_by_id(req["id"])
if not e:
return get_data_error_result(message="Failed to fetch updated MCP server.")
return get_json_result(data=updated_mcp.to_dict())
except Exception as e:
return server_error_response(e)
@manager.route("/rm", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcp_ids")
async def rm() -> Response:
req = await get_request_json()
mcp_ids = req.get("mcp_ids", [])
try:
req["tenant_id"] = current_user.id
if not MCPServerService.delete_by_ids(mcp_ids):
return get_data_error_result(message=f"Failed to delete MCP servers {mcp_ids}")
return get_json_result(data=True)
except Exception as e:
return server_error_response(e)
@manager.route("/import", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcpServers")
async def import_multiple() -> Response:
req = await get_request_json()
servers = req.get("mcpServers", {})
if not servers:
return get_data_error_result(message="No MCP servers provided.")
timeout = get_float(req, "timeout", 10)
results = []
try:
for server_name, config in servers.items():
if not all(key in config for key in {"type", "url"}):
results.append({"server": server_name, "success": False, "message": "Missing required fields (type or url)"})
continue
if not server_name or len(server_name.encode("utf-8")) > 255:
results.append({"server": server_name, "success": False, "message": f"Invalid MCP name or length is {len(server_name)} which is large than 255."})
continue
base_name = server_name
new_name = base_name
counter = 0
while True:
e, _ = MCPServerService.get_by_name_and_tenant(name=new_name, tenant_id=current_user.id)
if not e:
break
new_name = f"{base_name}_{counter}"
counter += 1
create_data = {
"id": get_uuid(),
"tenant_id": current_user.id,
"name": new_name,
"url": config["url"],
"server_type": config["type"],
"variables": {"authorization_token": config.get("authorization_token", "")},
}
headers = {"authorization_token": config["authorization_token"]} if "authorization_token" in config else {}
variables = {k: v for k, v in config.items() if k not in {"type", "url", "headers"}}
mcp_server = MCPServer(id=new_name, name=new_name, url=config["url"], server_type=config["type"], variables=variables, headers=headers)
server_tools, err_message = await thread_pool_exec(get_mcp_tools, [mcp_server], timeout)
if err_message:
results.append({"server": base_name, "success": False, "message": err_message})
continue
tools = server_tools[new_name]
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
create_data["variables"]["tools"] = tools
if MCPServerService.insert(**create_data):
result = {"server": server_name, "success": True, "action": "created", "id": create_data["id"], "new_name": new_name}
if new_name != base_name:
result["message"] = f"Renamed from '{base_name}' to '{new_name}' avoid duplication"
results.append(result)
else:
results.append({"server": server_name, "success": False, "message": "Failed to create MCP server."})
return get_json_result(data={"results": results})
except Exception as e:
return server_error_response(e)
@manager.route("/export", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcp_ids")
async def export_multiple() -> Response:
req = await get_request_json()
mcp_ids = req.get("mcp_ids", [])
if not mcp_ids:
return get_data_error_result(message="No MCP server IDs provided.")
try:
exported_servers = {}
for mcp_id in mcp_ids:
e, mcp_server = MCPServerService.get_by_id(mcp_id)
if e and mcp_server.tenant_id == current_user.id:
server_key = mcp_server.name
exported_servers[server_key] = {
"type": mcp_server.server_type,
"url": mcp_server.url,
"name": mcp_server.name,
"authorization_token": mcp_server.variables.get("authorization_token", ""),
"tools": mcp_server.variables.get("tools", {}),
}
return get_json_result(data={"mcpServers": exported_servers})
except Exception as e:
return server_error_response(e)
@manager.route("/list_tools", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcp_ids")
async def list_tools() -> Response:
req = await get_request_json()
mcp_ids = req.get("mcp_ids", [])
if not mcp_ids:
return get_data_error_result(message="No MCP server IDs provided.")
timeout = get_float(req, "timeout", 10)
results = {}
tool_call_sessions = []
try:
for mcp_id in mcp_ids:
e, mcp_server = MCPServerService.get_by_id(mcp_id)
if e and mcp_server.tenant_id == current_user.id:
server_key = mcp_server.id
cached_tools = mcp_server.variables.get("tools", {})
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
tool_call_sessions.append(tool_call_session)
try:
tools = await thread_pool_exec(tool_call_session.get_tools, timeout)
except Exception as e:
return get_data_error_result(message=f"MCP list tools error: {e}")
results[server_key] = []
for tool in tools:
tool_dict = tool.model_dump()
cached_tool = cached_tools.get(tool_dict["name"], {})
tool_dict["enabled"] = cached_tool.get("enabled", True)
results[server_key].append(tool_dict)
return get_json_result(data=results)
except Exception as e:
return server_error_response(e)
finally:
# PERF: blocking call to close sessions โ consider moving to background thread or task queue
await thread_pool_exec(close_multiple_mcp_toolcall_sessions, tool_call_sessions)
@manager.route("/test_tool", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcp_id", "tool_name", "arguments")
async def test_tool() -> Response:
req = await get_request_json()
mcp_id = req.get("mcp_id", "")
if not mcp_id:
return get_data_error_result(message="No MCP server ID provided.")
timeout = get_float(req, "timeout", 10)
tool_name = req.get("tool_name", "")
arguments = req.get("arguments", {})
if not all([tool_name, arguments]):
return get_data_error_result(message="Require provide tool name and arguments.")
tool_call_sessions = []
try:
e, mcp_server = MCPServerService.get_by_id(mcp_id)
if not e or mcp_server.tenant_id != current_user.id:
return get_data_error_result(message=f"Cannot find MCP server {mcp_id} for user {current_user.id}")
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
tool_call_sessions.append(tool_call_session)
result = await thread_pool_exec(tool_call_session.tool_call, tool_name, arguments, timeout)
# PERF: blocking call to close sessions โ consider moving to background thread or task queue
await thread_pool_exec(close_multiple_mcp_toolcall_sessions, tool_call_sessions)
return get_json_result(data=result)
except Exception as e:
return server_error_response(e)
@manager.route("/cache_tools", methods=["POST"]) # noqa: F821
@login_required
@validate_request("mcp_id", "tools")
async def cache_tool() -> Response:
req = await get_request_json()
mcp_id = req.get("mcp_id", "")
if not mcp_id:
return get_data_error_result(message="No MCP server ID provided.")
tools = req.get("tools", [])
e, mcp_server = MCPServerService.get_by_id(mcp_id)
if not e or mcp_server.tenant_id != current_user.id:
return get_data_error_result(message=f"Cannot find MCP server {mcp_id} for user {current_user.id}")
variables = mcp_server.variables
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
variables["tools"] = tools
if not MCPServerService.filter_update([MCPServer.id == mcp_id, MCPServer.tenant_id == current_user.id], {"variables": variables}):
return get_data_error_result(message="Failed to updated MCP server.")
return get_json_result(data=tools)
@manager.route("/test_mcp", methods=["POST"]) # noqa: F821
@validate_request("url", "server_type")
async def test_mcp() -> Response:
req = await get_request_json()
url = req.get("url", "")
if not url:
return get_data_error_result(message="Invalid MCP url.")
server_type = req.get("server_type", "")
if server_type not in VALID_MCP_SERVER_TYPES:
return get_data_error_result(message="Unsupported MCP server type.")
timeout = get_float(req, "timeout", 10)
headers = safe_json_parse(req.get("headers", {}))
variables = safe_json_parse(req.get("variables", {}))
mcp_server = MCPServer(id=f"{server_type}: {url}", server_type=server_type, url=url, headers=headers, variables=variables)
result = []
try:
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
try:
tools = await thread_pool_exec(tool_call_session.get_tools, timeout)
except Exception as e:
return get_data_error_result(message=f"Test MCP error: {e}")
finally:
# PERF: blocking call to close sessions โ consider moving to background thread or task queue
await thread_pool_exec(close_multiple_mcp_toolcall_sessions, [tool_call_session])
for tool in tools:
tool_dict = tool.model_dump()
tool_dict["enabled"] = True
result.append(tool_dict)
return get_json_result(data=result)
except Exception as e:
return server_error_response(e)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/mcp_server_app.py",
"license": "Apache License 2.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/db/services/mcp_server_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from peewee import fn
from api.db.db_models import DB, MCPServer
from api.db.services.common_service import CommonService
class MCPServerService(CommonService):
"""Service class for managing MCP server related database operations.
This class extends CommonService to provide specialized functionality for MCP server management,
including MCP server creation, updates, and deletions.
Attributes:
model: The MCPServer model class for database operations.
"""
model = MCPServer
@classmethod
@DB.connection_context()
def get_servers(cls, tenant_id: str, id_list: list[str] | None, page_number, items_per_page, orderby, desc,
keywords):
"""Retrieve all MCP servers associated with a tenant.
This method fetches all MCP servers for a given tenant, ordered by creation time.
It only includes fields for list display.
Args:
tenant_id (str): The unique identifier of the tenant.
id_list (list[str]): Get servers by ID list. Will ignore this condition if None.
Returns:
list[dict]: List of MCP server dictionaries containing MCP server details.
Returns None if no MCP servers are found.
"""
fields = [
cls.model.id,
cls.model.name,
cls.model.server_type,
cls.model.url,
cls.model.description,
cls.model.variables,
cls.model.create_date,
cls.model.update_date,
]
query = cls.model.select(*fields).order_by(cls.model.create_time.desc()).where(cls.model.tenant_id == tenant_id)
if id_list:
query = query.where(cls.model.id.in_(id_list))
if keywords:
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
if desc:
query = query.order_by(cls.model.getter_by(orderby).desc())
else:
query = query.order_by(cls.model.getter_by(orderby).asc())
if page_number and items_per_page:
query = query.paginate(page_number, items_per_page)
servers = list(query.dicts())
if not servers:
return None
return servers
@classmethod
@DB.connection_context()
def get_by_name_and_tenant(cls, name: str, tenant_id: str):
try:
mcp_server = cls.model.query(name=name, tenant_id=tenant_id)
return bool(mcp_server), mcp_server
except Exception:
return False, None
@classmethod
@DB.connection_context()
def delete_by_tenant_id(cls, tenant_id: str):
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/mcp_server_service.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/apps/search_app.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from quart import request
from api.apps import current_user, login_required
from api.constants import DATASET_NAME_LIMIT
from api.db.db_models import DB
from api.db.services import duplicate_name
from api.db.services.search_service import SearchService
from api.db.services.user_service import TenantService, UserTenantService
from common.misc_utils import get_uuid
from common.constants import RetCode, StatusEnum
from api.utils.api_utils import get_data_error_result, get_json_result, not_allowed_parameters, get_request_json, server_error_response, validate_request
@manager.route("/create", methods=["post"]) # noqa: F821
@login_required
@validate_request("name")
async def create():
req = await get_request_json()
search_name = req["name"]
description = req.get("description", "")
if not isinstance(search_name, str):
return get_data_error_result(message="Search name must be string.")
if search_name.strip() == "":
return get_data_error_result(message="Search name can't be empty.")
if len(search_name.encode("utf-8")) > 255:
return get_data_error_result(message=f"Search name length is {len(search_name)} which is large than 255.")
e, _ = TenantService.get_by_id(current_user.id)
if not e:
return get_data_error_result(message="Authorized identity.")
search_name = search_name.strip()
search_name = duplicate_name(SearchService.query, name=search_name, tenant_id=current_user.id, status=StatusEnum.VALID.value)
req["id"] = get_uuid()
req["name"] = search_name
req["description"] = description
req["tenant_id"] = current_user.id
req["created_by"] = current_user.id
with DB.atomic():
try:
if not SearchService.save(**req):
return get_data_error_result()
return get_json_result(data={"search_id": req["id"]})
except Exception as e:
return server_error_response(e)
@manager.route("/update", methods=["post"]) # noqa: F821
@login_required
@validate_request("search_id", "name", "search_config", "tenant_id")
@not_allowed_parameters("id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
async def update():
req = await get_request_json()
if not isinstance(req["name"], str):
return get_data_error_result(message="Search name must be string.")
if req["name"].strip() == "":
return get_data_error_result(message="Search name can't be empty.")
if len(req["name"].encode("utf-8")) > DATASET_NAME_LIMIT:
return get_data_error_result(message=f"Search name length is {len(req['name'])} which is large than {DATASET_NAME_LIMIT}")
req["name"] = req["name"].strip()
tenant_id = req["tenant_id"]
e, _ = TenantService.get_by_id(tenant_id)
if not e:
return get_data_error_result(message="Authorized identity.")
search_id = req["search_id"]
if not SearchService.accessible4deletion(search_id, current_user.id):
return get_json_result(data=False, message="No authorization.", code=RetCode.AUTHENTICATION_ERROR)
try:
search_app = SearchService.query(tenant_id=tenant_id, id=search_id)[0]
if not search_app:
return get_json_result(data=False, message=f"Cannot find search {search_id}", code=RetCode.DATA_ERROR)
if req["name"].lower() != search_app.name.lower() and len(SearchService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) >= 1:
return get_data_error_result(message="Duplicated search name.")
if "search_config" in req:
current_config = search_app.search_config or {}
new_config = req["search_config"]
if not isinstance(new_config, dict):
return get_data_error_result(message="search_config must be a JSON object")
updated_config = {**current_config, **new_config}
req["search_config"] = updated_config
req.pop("search_id", None)
req.pop("tenant_id", None)
updated = SearchService.update_by_id(search_id, req)
if not updated:
return get_data_error_result(message="Failed to update search")
e, updated_search = SearchService.get_by_id(search_id)
if not e:
return get_data_error_result(message="Failed to fetch updated search")
return get_json_result(data=updated_search.to_dict())
except Exception as e:
return server_error_response(e)
@manager.route("/detail", methods=["GET"]) # noqa: F821
@login_required
def detail():
search_id = request.args["search_id"]
try:
tenants = UserTenantService.query(user_id=current_user.id)
for tenant in tenants:
if SearchService.query(tenant_id=tenant.tenant_id, id=search_id):
break
else:
return get_json_result(data=False, message="Has no permission for this operation.", code=RetCode.OPERATING_ERROR)
search = SearchService.get_detail(search_id)
if not search:
return get_data_error_result(message="Can't find this Search App!")
return get_json_result(data=search)
except Exception as e:
return server_error_response(e)
@manager.route("/list", methods=["POST"]) # noqa: F821
@login_required
async def list_search_app():
keywords = request.args.get("keywords", "")
page_number = int(request.args.get("page", 0))
items_per_page = int(request.args.get("page_size", 0))
orderby = request.args.get("orderby", "create_time")
if request.args.get("desc", "true").lower() == "false":
desc = False
else:
desc = True
req = await get_request_json()
owner_ids = req.get("owner_ids", [])
try:
if not owner_ids:
# tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
# tenants = [m["tenant_id"] for m in tenants]
tenants = []
search_apps, total = SearchService.get_by_tenant_ids(tenants, current_user.id, page_number, items_per_page, orderby, desc, keywords)
else:
tenants = owner_ids
search_apps, total = SearchService.get_by_tenant_ids(tenants, current_user.id, 0, 0, orderby, desc, keywords)
search_apps = [search_app for search_app in search_apps if search_app["tenant_id"] in tenants]
total = len(search_apps)
if page_number and items_per_page:
search_apps = search_apps[(page_number - 1) * items_per_page : page_number * items_per_page]
return get_json_result(data={"search_apps": search_apps, "total": total})
except Exception as e:
return server_error_response(e)
@manager.route("/rm", methods=["post"]) # noqa: F821
@login_required
@validate_request("search_id")
async def rm():
req = await get_request_json()
search_id = req["search_id"]
if not SearchService.accessible4deletion(search_id, current_user.id):
return get_json_result(data=False, message="No authorization.", code=RetCode.AUTHENTICATION_ERROR)
try:
if not SearchService.delete_by_id(search_id):
return get_data_error_result(message=f"Failed to delete search App {search_id}")
return get_json_result(data=True)
except Exception as e:
return server_error_response(e)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/search_app.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/db/services/search_service.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from peewee import fn
from common.constants import StatusEnum
from api.db.db_models import DB, Search, User
from api.db.services.common_service import CommonService
from common.time_utils import current_timestamp, datetime_format
class SearchService(CommonService):
model = Search
@classmethod
def save(cls, **kwargs):
current_ts = current_timestamp()
current_date = datetime_format(datetime.now())
kwargs["create_time"] = current_ts
kwargs["create_date"] = current_date
kwargs["update_time"] = current_ts
kwargs["update_date"] = current_date
obj = cls.model.create(**kwargs)
return obj
@classmethod
@DB.connection_context()
def accessible4deletion(cls, search_id, user_id) -> bool:
search = (
cls.model.select(cls.model.id)
.where(
cls.model.id == search_id,
cls.model.created_by == user_id,
cls.model.status == StatusEnum.VALID.value,
)
.first()
)
return search is not None
@classmethod
@DB.connection_context()
def get_detail(cls, search_id):
fields = [
cls.model.id,
cls.model.avatar,
cls.model.tenant_id,
cls.model.name,
cls.model.description,
cls.model.created_by,
cls.model.search_config,
cls.model.update_time,
User.nickname,
User.avatar.alias("tenant_avatar"),
]
search = (
cls.model.select(*fields)
.join(User, on=((User.id == cls.model.tenant_id) & (User.status == StatusEnum.VALID.value)))
.where((cls.model.id == search_id) & (cls.model.status == StatusEnum.VALID.value))
.first()
.to_dict()
)
if not search:
return {}
return search
@classmethod
@DB.connection_context()
def get_by_tenant_ids(cls, joined_tenant_ids, user_id, page_number, items_per_page, orderby, desc, keywords):
fields = [
cls.model.id,
cls.model.avatar,
cls.model.tenant_id,
cls.model.name,
cls.model.description,
cls.model.created_by,
cls.model.status,
cls.model.update_time,
cls.model.create_time,
User.nickname,
User.avatar.alias("tenant_avatar"),
]
query = (
cls.model.select(*fields)
.join(User, on=(cls.model.tenant_id == User.id))
.where(((cls.model.tenant_id.in_(joined_tenant_ids)) | (cls.model.tenant_id == user_id)) & (
cls.model.status == StatusEnum.VALID.value))
)
if keywords:
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
if desc:
query = query.order_by(cls.model.getter_by(orderby).desc())
else:
query = query.order_by(cls.model.getter_by(orderby).asc())
count = query.count()
if page_number and items_per_page:
query = query.paginate(page_number, items_per_page)
return list(query.dicts()), count
@classmethod
@DB.connection_context()
def delete_by_tenant_id(cls, tenant_id):
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/search_service.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:test/testcases/test_web_api/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import time
import uuid
from pathlib import Path
import requests
from configs import HOST_ADDRESS, VERSION
from requests_toolbelt import MultipartEncoder
from utils.file_utils import create_txt_file
HEADERS = {"Content-Type": "application/json"}
KB_APP_URL = f"/{VERSION}/kb"
DOCUMENT_APP_URL = f"/{VERSION}/document"
CHUNK_API_URL = f"/{VERSION}/chunk"
DIALOG_APP_URL = f"/{VERSION}/dialog"
# SESSION_WITH_CHAT_ASSISTANT_API_URL = "/api/v1/chats/{chat_id}/sessions"
# SESSION_WITH_AGENT_API_URL = "/api/v1/agents/{agent_id}/sessions"
MEMORY_API_URL = f"/api/{VERSION}/memories"
MESSAGE_API_URL = f"/api/{VERSION}/messages"
API_APP_URL = f"/{VERSION}/api"
SYSTEM_APP_URL = f"/{VERSION}/system"
LLM_APP_URL = f"/{VERSION}/llm"
PLUGIN_APP_URL = f"/{VERSION}/plugin"
SEARCH_APP_URL = f"/{VERSION}/search"
def _http_debug_enabled():
return os.getenv("TEST_HTTP_DEBUG") == "1"
def _redact_payload(payload):
if not isinstance(payload, dict):
return payload
redacted = {}
for key, value in payload.items():
if any(token in key.lower() for token in ("api_key", "password", "token", "secret", "authorization")):
redacted[key] = "***redacted***"
else:
redacted[key] = value
return redacted
def _log_http_debug(method, url, req_id, payload, status, text, resp_json, elapsed_ms):
if not _http_debug_enabled():
return
payload_summary = _redact_payload(payload)
print(f"[HTTP DEBUG] {method} {url} req_id={req_id} elapsed_ms={elapsed_ms:.1f}")
print(f"[HTTP DEBUG] request_payload={json.dumps(payload_summary, default=str)}")
print(f"[HTTP DEBUG] status={status}")
print(f"[HTTP DEBUG] response_text={text}")
print(f"[HTTP DEBUG] response_json={json.dumps(resp_json, default=str) if resp_json is not None else None}")
# API APP
def api_new_token(auth, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{API_APP_URL}/new_token", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def api_token_list(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{API_APP_URL}/token_list", headers=headers, auth=auth, params=params)
return res.json()
def api_rm_token(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{API_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def api_stats(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{API_APP_URL}/stats", headers=headers, auth=auth, params=params)
return res.json()
# SYSTEM APP
def system_new_token(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{SYSTEM_APP_URL}/new_token", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def system_token_list(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{SYSTEM_APP_URL}/token_list", headers=headers, auth=auth, params=params)
return res.json()
def system_delete_token(auth, token, *, headers=HEADERS):
res = requests.delete(url=f"{HOST_ADDRESS}{SYSTEM_APP_URL}/token/{token}", headers=headers, auth=auth)
return res.json()
def system_status(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{SYSTEM_APP_URL}/status", headers=headers, auth=auth, params=params)
return res.json()
def system_version(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{SYSTEM_APP_URL}/version", headers=headers, auth=auth, params=params)
return res.json()
def system_config(auth=None, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{SYSTEM_APP_URL}/config", headers=headers, auth=auth, params=params)
return res.json()
# LLM APP
def llm_factories(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{LLM_APP_URL}/factories", headers=headers, auth=auth, params=params)
return res.json()
def llm_list(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{LLM_APP_URL}/list", headers=headers, auth=auth, params=params)
return res.json()
# PLUGIN APP
def plugin_llm_tools(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{PLUGIN_APP_URL}/llm_tools", headers=headers, auth=auth, params=params)
return res.json()
# SEARCH APP
def search_create(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{SEARCH_APP_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def search_update(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{SEARCH_APP_URL}/update", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def search_detail(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{SEARCH_APP_URL}/detail", headers=headers, auth=auth, params=params)
return res.json()
def search_list(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{SEARCH_APP_URL}/list", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def search_rm(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{SEARCH_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
# KB APP
def create_kb(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_kbs(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/list", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def update_kb(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/update", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def rm_kb(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def detail_kb(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/detail", headers=headers, auth=auth, params=params)
return res.json()
def kb_get_meta(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/get_meta", headers=headers, auth=auth, params=params)
return res.json()
def kb_basic_info(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/basic_info", headers=headers, auth=auth, params=params)
return res.json()
def kb_update_metadata_setting(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/update_metadata_setting", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def kb_list_pipeline_logs(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/list_pipeline_logs", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def kb_list_pipeline_dataset_logs(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/list_pipeline_dataset_logs", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def kb_delete_pipeline_logs(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/delete_pipeline_logs", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def kb_pipeline_log_detail(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/pipeline_log_detail", headers=headers, auth=auth, params=params)
return res.json()
def kb_run_graphrag(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/run_graphrag", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def kb_trace_graphrag(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/trace_graphrag", headers=headers, auth=auth, params=params)
return res.json()
def kb_run_raptor(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/run_raptor", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def kb_trace_raptor(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/trace_raptor", headers=headers, auth=auth, params=params)
return res.json()
def kb_run_mindmap(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/run_mindmap", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def kb_trace_mindmap(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/trace_mindmap", headers=headers, auth=auth, params=params)
return res.json()
def list_tags_from_kbs(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/tags", headers=headers, auth=auth, params=params)
return res.json()
def list_tags(auth, dataset_id, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/tags", headers=headers, auth=auth, params=params)
return res.json()
def rm_tags(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/rm_tags", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def rename_tags(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/rename_tag", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def knowledge_graph(auth, dataset_id, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/knowledge_graph", headers=headers, auth=auth, params=params)
return res.json()
def delete_knowledge_graph(auth, dataset_id, payload=None, *, headers=HEADERS, data=None):
res = requests.delete(url=f"{HOST_ADDRESS}{KB_APP_URL}/{dataset_id}/knowledge_graph", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def batch_create_datasets(auth, num):
ids = []
for i in range(num):
res = create_kb(auth, {"name": f"kb_{i}"})
ids.append(res["data"]["kb_id"])
return ids
# DOCUMENT APP
def upload_documents(auth, payload=None, files_path=None, *, filename_override=None):
url = f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/upload"
if files_path is None:
files_path = []
fields = []
file_objects = []
try:
if payload:
for k, v in payload.items():
fields.append((k, str(v)))
for fp in files_path:
p = Path(fp)
f = p.open("rb")
filename = filename_override if filename_override is not None else p.name
fields.append(("file", (filename, f)))
file_objects.append(f)
m = MultipartEncoder(fields=fields)
res = requests.post(
url=url,
headers={"Content-Type": m.content_type},
auth=auth,
data=m,
)
return res.json()
finally:
for f in file_objects:
f.close()
def create_document(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_documents(auth, params=None, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/list", headers=headers, auth=auth, params=params, json=payload, data=data)
return res.json()
def delete_document(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def parse_documents(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/run", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_filter(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/filter", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_infos(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/infos", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_metadata_summary(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/metadata/summary", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_metadata_update(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/metadata/update", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_update_metadata_setting(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/update_metadata_setting", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_change_status(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/change_status", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_rename(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/rename", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def document_set_meta(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DOCUMENT_APP_URL}/set_meta", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def bulk_upload_documents(auth, kb_id, num, tmp_path):
fps = []
for i in range(num):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
fps.append(fp)
res = upload_documents(auth, {"kb_id": kb_id}, fps)
document_ids = []
for document in res["data"]:
document_ids.append(document["id"])
return document_ids
# CHUNK APP
def add_chunk(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/create", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def list_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/list", headers=headers, auth=auth, json=payload)
return res.json()
def get_chunk(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/get", headers=headers, auth=auth, params=params)
return res.json()
def update_chunk(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/set", headers=headers, auth=auth, json=payload)
return res.json()
def switch_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/switch", headers=headers, auth=auth, json=payload)
return res.json()
def delete_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/rm", headers=headers, auth=auth, json=payload)
return res.json()
def retrieval_chunks(auth, payload=None, *, headers=HEADERS):
res = requests.post(url=f"{HOST_ADDRESS}{CHUNK_API_URL}/retrieval_test", headers=headers, auth=auth, json=payload)
return res.json()
def batch_add_chunks(auth, doc_id, num):
chunk_ids = []
for i in range(num):
res = add_chunk(auth, {"doc_id": doc_id, "content_with_weight": f"chunk test {i}"})
chunk_ids.append(res["data"]["chunk_id"])
return chunk_ids
# DIALOG APP
def create_dialog(auth, payload=None, *, headers=HEADERS, data=None):
if payload is None:
payload = {}
url = f"{HOST_ADDRESS}{DIALOG_APP_URL}/set"
req_id = str(uuid.uuid4())
req_headers = dict(headers)
req_headers["X-Request-ID"] = req_id
start = time.monotonic()
res = requests.post(url=url, headers=req_headers, auth=auth, json=payload, data=data)
elapsed_ms = (time.monotonic() - start) * 1000
resp_json = None
json_error = None
try:
resp_json = res.json()
except ValueError as exc:
json_error = exc
_log_http_debug("POST", url, req_id, payload, res.status_code, res.text, resp_json, elapsed_ms)
if _http_debug_enabled():
if not res.ok or (resp_json is not None and resp_json.get("code") != 0):
payload_summary = _redact_payload(payload)
raise AssertionError(
"HTTP helper failure: "
f"req_id={req_id} url={url} status={res.status_code} "
f"payload={payload_summary} response={res.text}"
)
if json_error:
raise json_error
return resp_json
def update_dialog(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/set", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def get_dialog(auth, params=None, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/get", headers=headers, auth=auth, params=params)
return res.json()
def list_dialogs(auth, *, headers=HEADERS):
res = requests.get(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/list", headers=headers, auth=auth)
return res.json()
def delete_dialog(auth, payload=None, *, headers=HEADERS, data=None):
res = requests.post(url=f"{HOST_ADDRESS}{DIALOG_APP_URL}/rm", headers=headers, auth=auth, json=payload, data=data)
return res.json()
def batch_create_dialogs(auth, num, kb_ids=None):
if kb_ids is None:
kb_ids = []
dialog_ids = []
for i in range(num):
if kb_ids:
prompt_config = {
"system": "You are a helpful assistant. Use the following knowledge to answer questions: {knowledge}",
"parameters": [{"key": "knowledge", "optional": False}],
}
else:
prompt_config = {
"system": "You are a helpful assistant.",
"parameters": [],
}
payload = {
"name": f"dialog_{i}",
"description": f"Test dialog {i}",
"kb_ids": kb_ids,
"prompt_config": prompt_config,
"top_n": 6,
"top_k": 1024,
"similarity_threshold": 0.1,
"vector_similarity_weight": 0.3,
"llm_setting": {"model": "gpt-3.5-turbo", "temperature": 0.7},
}
res = create_dialog(auth, payload)
if res is None or res.get("code") != 0:
uses_knowledge = "{knowledge}" in payload["prompt_config"]["system"]
raise AssertionError(
"batch_create_dialogs failed: "
f"res={res} kb_ids_len={len(kb_ids)} uses_knowledge={uses_knowledge}"
)
if res["code"] == 0:
dialog_ids.append(res["data"]["id"])
return dialog_ids
def delete_dialogs(auth):
res = list_dialogs(auth)
if res["code"] == 0 and res["data"]:
dialog_ids = [dialog["id"] for dialog in res["data"]]
if dialog_ids:
delete_dialog(auth, {"dialog_ids": dialog_ids})
# MEMORY APP
def create_memory(auth, payload=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def update_memory(auth, memory_id:str, payload=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def delete_memory(auth, memory_id:str):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
res = requests.delete(url=url, headers=HEADERS, auth=auth)
return res.json()
def list_memory(auth, params=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def get_memory_config(auth, memory_id:str):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}/config"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def list_memory_message(auth, memory_id, params=None):
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def add_message(auth, payload=None):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}"
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def forget_message(auth, memory_id: str, message_id: int):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}"
res = requests.delete(url=url, headers=HEADERS, auth=auth)
return res.json()
def update_message_status(auth, memory_id: str, message_id: int, status: bool):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}"
payload = {"status": status}
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
return res.json()
def search_message(auth, params=None):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/search"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def get_recent_message(auth, params=None):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}"
if params:
query_parts = []
for key, value in params.items():
if isinstance(value, list):
for item in value:
query_parts.append(f"{key}={item}")
else:
query_parts.append(f"{key}={value}")
query_string = "&".join(query_parts)
url = f"{url}?{query_string}"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
def get_message_content(auth, memory_id: str, message_id: int):
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}/content"
res = requests.get(url=url, headers=HEADERS, auth=auth)
return res.json()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/common.py",
"license": "Apache License 2.0",
"lines": 486,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_create_kb.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import create_kb
from configs import DATASET_NAME_LIMIT, INVALID_API_TOKEN
from hypothesis import example, given, settings
from libs.auth import RAGFlowWebApiAuth
from utils.hypothesis_utils import valid_names
@pytest.mark.usefixtures("clear_datasets")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = create_kb(invalid_auth, {"name": "auth_test"})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("clear_datasets")
class TestCapability:
@pytest.mark.p3
def test_create_kb_1k(self, WebApiAuth):
for i in range(1_000):
payload = {"name": f"dataset_{i}"}
res = create_kb(WebApiAuth, payload)
assert res["code"] == 0, f"Failed to create dataset {i}"
@pytest.mark.p3
def test_create_kb_concurrent(self, WebApiAuth):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(create_kb, WebApiAuth, {"name": f"dataset_{i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.usefixtures("clear_datasets")
class TestDatasetCreate:
@pytest.mark.p1
@given(name=valid_names())
@example("a" * 128)
@settings(max_examples=20)
def test_name(self, WebApiAuth, name):
res = create_kb(WebApiAuth, {"name": name})
assert res["code"] == 0, res
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "Dataset name can't be empty."),
(" ", "Dataset name can't be empty."),
("a" * (DATASET_NAME_LIMIT + 1), "Dataset name length is 129 which is large than 128"),
(0, "Dataset name must be string."),
(None, "Dataset name must be string."),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, WebApiAuth, name, expected_message):
payload = {"name": name}
res = create_kb(WebApiAuth, payload)
assert res["code"] == 102, res
assert expected_message in res["message"], res
@pytest.mark.p3
def test_name_duplicated(self, WebApiAuth):
name = "duplicated_name"
payload = {"name": name}
res = create_kb(WebApiAuth, payload)
assert res["code"] == 0, res
res = create_kb(WebApiAuth, payload)
assert res["code"] == 0, res
@pytest.mark.p3
def test_name_case_insensitive(self, WebApiAuth):
name = "CaseInsensitive"
payload = {"name": name.upper()}
res = create_kb(WebApiAuth, payload)
assert res["code"] == 0, res
payload = {"name": name.lower()}
res = create_kb(WebApiAuth, payload)
assert res["code"] == 0, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_create_kb.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_detail_kb.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
detail_kb,
)
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = detail_kb(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDatasetsDetail:
@pytest.mark.p1
def test_kb_id(self, WebApiAuth, add_dataset):
kb_id = add_dataset
payload = {"kb_id": kb_id}
res = detail_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["name"] == "kb_0"
@pytest.mark.p2
def test_id_wrong_uuid(self, WebApiAuth):
payload = {"kb_id": "d94a8dc02c9711f0930f7fbc369eab6d"}
res = detail_kb(WebApiAuth, payload)
assert res["code"] == 103, res
assert "Only owner of dataset authorized for this operation." in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_detail_kb.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_list_kbs.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import list_kbs
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils import is_sorted
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = list_kbs(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestCapability:
@pytest.mark.p3
def test_concurrent_list(self, WebApiAuth):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_kbs, WebApiAuth) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.usefixtures("add_datasets")
class TestDatasetsList:
@pytest.mark.p2
def test_params_unset(self, WebApiAuth):
res = list_kbs(WebApiAuth, None)
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == 5, res
@pytest.mark.p2
def test_params_empty(self, WebApiAuth):
res = list_kbs(WebApiAuth, {})
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == 5, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 2, "page_size": 2}, 2),
({"page": 3, "page_size": 2}, 1),
({"page": 4, "page_size": 2}, 0),
({"page": "2", "page_size": 2}, 2),
({"page": 1, "page_size": 10}, 5),
],
ids=["normal_middle_page", "normal_last_partial_page", "beyond_max_page", "string_page_number", "full_data_single_page"],
)
def test_page(self, WebApiAuth, params, expected_page_size):
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == expected_page_size, res
@pytest.mark.skip
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_code, expected_message",
[
({"page": 0}, 101, "Input should be greater than or equal to 1"),
({"page": "a"}, 101, "Input should be a valid integer, unable to parse string as an integer"),
],
ids=["page_0", "page_a"],
)
def test_page_invalid(self, WebApiAuth, params, expected_code, expected_message):
res = list_kbs(WebApiAuth, params=params)
assert res["code"] == expected_code, res
assert expected_message in res["message"], res
@pytest.mark.p2
def test_page_none(self, WebApiAuth):
params = {"page": None}
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == 5, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 1, "page_size": 1}, 1),
({"page": 1, "page_size": 3}, 3),
({"page": 1, "page_size": 5}, 5),
({"page": 1, "page_size": 6}, 5),
({"page": 1, "page_size": "1"}, 1),
],
ids=["min_valid_page_size", "medium_page_size", "page_size_equals_total", "page_size_exceeds_total", "string_type_page_size"],
)
def test_page_size(self, WebApiAuth, params, expected_page_size):
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == expected_page_size, res
@pytest.mark.skip
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_code, expected_message",
[
({"page_size": 0}, 101, "Input should be greater than or equal to 1"),
({"page_size": "a"}, 101, "Input should be a valid integer, unable to parse string as an integer"),
],
)
def test_page_size_invalid(self, WebApiAuth, params, expected_code, expected_message):
res = list_kbs(WebApiAuth, params)
assert res["code"] == expected_code, res
assert expected_message in res["message"], res
@pytest.mark.p2
def test_page_size_none(self, WebApiAuth):
params = {"page_size": None}
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == 5, res
@pytest.mark.p3
@pytest.mark.parametrize(
"params, assertions",
[
({"orderby": "update_time"}, lambda r: (is_sorted(r["data"]["kbs"], "update_time", True))),
],
ids=["orderby_update_time"],
)
def test_orderby(self, WebApiAuth, params, assertions):
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
if callable(assertions):
assert assertions(res), res
@pytest.mark.p3
@pytest.mark.parametrize(
"params, assertions",
[
({"desc": "True"}, lambda r: (is_sorted(r["data"]["kbs"], "update_time", True))),
({"desc": "False"}, lambda r: (is_sorted(r["data"]["kbs"], "update_time", False))),
],
ids=["desc=True", "desc=False"],
)
def test_desc(self, WebApiAuth, params, assertions):
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
if callable(assertions):
assert assertions(res), res
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"parser_id": "naive"}, 5),
({"parser_id": "qa"}, 0),
],
ids=["naive", "dqa"],
)
def test_parser_id(self, WebApiAuth, params, expected_page_size):
res = list_kbs(WebApiAuth, params)
assert res["code"] == 0, res
assert len(res["data"]["kbs"]) == expected_page_size, res
@pytest.mark.p2
def test_owner_ids_payload_mode(self, WebApiAuth):
base_res = list_kbs(WebApiAuth, {"page_size": 10})
assert base_res["code"] == 0, base_res
assert base_res["data"]["kbs"], base_res
owner_id = base_res["data"]["kbs"][0]["tenant_id"]
res = list_kbs(
WebApiAuth,
params={"page": 1, "page_size": 2, "desc": "false"},
payload={"owner_ids": [owner_id]},
)
assert res["code"] == 0, res
assert res["data"]["total"] >= len(res["data"]["kbs"]), res
assert len(res["data"]["kbs"]) <= 2, res
assert all(kb["tenant_id"] == owner_id for kb in res["data"]["kbs"]), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_list_kbs.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_rm_kb.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import (
list_kbs,
rm_kb,
)
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = rm_kb(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestDatasetsDelete:
@pytest.mark.p1
def test_kb_id(self, WebApiAuth, add_datasets_func):
kb_ids = add_datasets_func
payload = {"kb_id": kb_ids[0]}
res = rm_kb(WebApiAuth, payload)
assert res["code"] == 0, res
res = list_kbs(WebApiAuth)
assert len(res["data"]["kbs"]) == 2, res
@pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func")
def test_id_wrong_uuid(self, WebApiAuth):
payload = {"kb_id": "d94a8dc02c9711f0930f7fbc369eab6d"}
res = rm_kb(WebApiAuth, payload)
assert res["code"] == 109, res
assert "No authorization." in res["message"], res
res = list_kbs(WebApiAuth)
assert len(res["data"]["kbs"]) == 1, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_rm_kb.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_update_kb.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import update_kb
from configs import DATASET_NAME_LIMIT, INVALID_API_TOKEN
from hypothesis import HealthCheck, example, given, settings
from libs.auth import RAGFlowWebApiAuth
from utils import encode_avatar
from utils.file_utils import create_image_file
from utils.hypothesis_utils import valid_names
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = update_kb(invalid_auth, "dataset_id")
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestCapability:
@pytest.mark.p3
def test_update_dateset_concurrent(self, WebApiAuth, add_dataset_func):
dataset_id = add_dataset_func
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(
update_kb,
WebApiAuth,
{
"kb_id": dataset_id,
"name": f"dataset_{i}",
"description": "",
"parser_id": "naive",
},
)
for i in range(count)
]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
class TestDatasetUpdate:
@pytest.mark.p3
def test_dataset_id_not_uuid(self, WebApiAuth):
payload = {"name": "not uuid", "description": "", "parser_id": "naive", "kb_id": "not_uuid"}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 109, res
assert "No authorization." in res["message"], res
@pytest.mark.p1
@given(name=valid_names())
@example("a" * 128)
# Network-bound API call; disable Hypothesis deadline to avoid flaky timeouts.
@settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None)
def test_name(self, WebApiAuth, add_dataset_func, name):
dataset_id = add_dataset_func
payload = {"name": name, "description": "", "parser_id": "naive", "kb_id": dataset_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["name"] == name, res
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "Dataset name can't be empty."),
(" ", "Dataset name can't be empty."),
("a" * (DATASET_NAME_LIMIT + 1), "Dataset name length is 129 which is large than 128"),
(0, "Dataset name must be string."),
(None, "Dataset name must be string."),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, WebApiAuth, add_dataset_func, name, expected_message):
kb_id = add_dataset_func
payload = {"name": name, "description": "", "parser_id": "naive", "kb_id": kb_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 102, res
assert expected_message in res["message"], res
@pytest.mark.p3
def test_name_duplicated(self, WebApiAuth, add_datasets_func):
kb_id = add_datasets_func[0]
name = "kb_1"
payload = {"name": name, "description": "", "parser_id": "naive", "kb_id": kb_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "Duplicated dataset name.", res
@pytest.mark.p3
def test_name_case_insensitive(self, WebApiAuth, add_datasets_func):
kb_id = add_datasets_func[0]
name = "KB_1"
payload = {"name": name, "description": "", "parser_id": "naive", "kb_id": kb_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "Duplicated dataset name.", res
@pytest.mark.p2
def test_avatar(self, WebApiAuth, add_dataset_func, tmp_path):
kb_id = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {
"name": "avatar",
"description": "",
"parser_id": "naive",
"kb_id": kb_id,
"avatar": f"data:image/png;base64,{encode_avatar(fn)}",
}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["avatar"] == f"data:image/png;base64,{encode_avatar(fn)}", res
@pytest.mark.p2
def test_description(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
payload = {"name": "description", "description": "description", "parser_id": "naive", "kb_id": kb_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["description"] == "description", res
@pytest.mark.p1
@pytest.mark.parametrize(
"embedding_model",
[
"BAAI/bge-small-en-v1.5@Builtin",
"embedding-3@ZHIPU-AI",
],
ids=["builtin_baai", "tenant_zhipu"],
)
def test_embedding_model(self, WebApiAuth, add_dataset_func, embedding_model):
kb_id = add_dataset_func
payload = {"name": "embedding_model", "description": "", "parser_id": "naive", "kb_id": kb_id, "embd_id": embedding_model}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["embd_id"] == embedding_model, res
@pytest.mark.p2
@pytest.mark.parametrize(
"permission",
[
"me",
"team",
],
ids=["me", "team"],
)
def test_permission(self, WebApiAuth, add_dataset_func, permission):
kb_id = add_dataset_func
payload = {"name": "permission", "description": "", "parser_id": "naive", "kb_id": kb_id, "permission": permission}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["permission"] == permission.lower().strip(), res
@pytest.mark.p1
@pytest.mark.parametrize(
"chunk_method",
[
"naive",
"book",
"email",
"laws",
"manual",
"one",
"paper",
"picture",
"presentation",
"qa",
"table",
pytest.param("tag", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="Infinity does not support parser_id=tag")),
],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
)
def test_chunk_method(self, WebApiAuth, add_dataset_func, chunk_method):
kb_id = add_dataset_func
payload = {"name": "chunk_method", "description": "", "parser_id": chunk_method, "kb_id": kb_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["parser_id"] == chunk_method, res
@pytest.mark.p1
@pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="Infinity does not support parser_id=tag")
def test_chunk_method_tag_with_infinity(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
payload = {"name": "chunk_method", "description": "", "parser_id": "tag", "kb_id": kb_id}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 103, res
assert res["message"] == "The chunking method Tag has not been supported by Infinity yet.", res
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="#8208")
@pytest.mark.p2
@pytest.mark.parametrize("pagerank", [0, 50, 100], ids=["min", "mid", "max"])
def test_pagerank(self, WebApiAuth, add_dataset_func, pagerank):
kb_id = add_dataset_func
payload = {"name": "pagerank", "description": "", "parser_id": "naive", "kb_id": kb_id, "pagerank": pagerank}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["pagerank"] == pagerank, res
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="#8208")
@pytest.mark.p2
def test_pagerank_set_to_0(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
payload = {"name": "pagerank", "description": "", "parser_id": "naive", "kb_id": kb_id, "pagerank": 50}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["pagerank"] == 50, res
payload = {"name": "pagerank", "description": "", "parser_id": "naive", "kb_id": kb_id, "pagerank": 0}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["pagerank"] == 0, res
@pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="#8208")
@pytest.mark.p2
def test_pagerank_infinity(self, WebApiAuth, add_dataset_func):
kb_id = add_dataset_func
payload = {"name": "pagerank", "description": "", "parser_id": "naive", "kb_id": kb_id, "pagerank": 50}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 102, res
assert res["message"] == "'pagerank' can only be set when doc_engine is elasticsearch", res
@pytest.mark.p1
@pytest.mark.parametrize(
"parser_config",
[
{"auto_keywords": 0},
{"auto_keywords": 16},
{"auto_keywords": 32},
{"auto_questions": 0},
{"auto_questions": 5},
{"auto_questions": 10},
{"chunk_token_num": 1},
{"chunk_token_num": 1024},
{"chunk_token_num": 2048},
{"delimiter": "\n"},
{"delimiter": " "},
{"html4excel": True},
{"html4excel": False},
{"layout_recognize": "DeepDOC"},
{"layout_recognize": "Plain Text"},
{"tag_kb_ids": ["1", "2"]},
{"topn_tags": 1},
{"topn_tags": 5},
{"topn_tags": 10},
{"filename_embd_weight": 0.1},
{"filename_embd_weight": 0.5},
{"filename_embd_weight": 1.0},
{"task_page_size": 1},
{"task_page_size": None},
{"pages": [[1, 100]]},
{"pages": None},
{"graphrag": {"use_graphrag": True}},
{"graphrag": {"use_graphrag": False}},
{"graphrag": {"entity_types": ["age", "sex", "height", "weight"]}},
{"graphrag": {"method": "general"}},
{"graphrag": {"method": "light"}},
{"graphrag": {"community": True}},
{"graphrag": {"community": False}},
{"graphrag": {"resolution": True}},
{"graphrag": {"resolution": False}},
{"raptor": {"use_raptor": True}},
{"raptor": {"use_raptor": False}},
{"raptor": {"prompt": "Who are you?"}},
{"raptor": {"max_token": 1}},
{"raptor": {"max_token": 1024}},
{"raptor": {"max_token": 2048}},
{"raptor": {"threshold": 0.0}},
{"raptor": {"threshold": 0.5}},
{"raptor": {"threshold": 1.0}},
{"raptor": {"max_cluster": 1}},
{"raptor": {"max_cluster": 512}},
{"raptor": {"max_cluster": 1024}},
{"raptor": {"random_seed": 0}},
],
ids=[
"auto_keywords_min",
"auto_keywords_mid",
"auto_keywords_max",
"auto_questions_min",
"auto_questions_mid",
"auto_questions_max",
"chunk_token_num_min",
"chunk_token_num_mid",
"chunk_token_num_max",
"delimiter",
"delimiter_space",
"html4excel_true",
"html4excel_false",
"layout_recognize_DeepDOC",
"layout_recognize_navie",
"tag_kb_ids",
"topn_tags_min",
"topn_tags_mid",
"topn_tags_max",
"filename_embd_weight_min",
"filename_embd_weight_mid",
"filename_embd_weight_max",
"task_page_size_min",
"task_page_size_None",
"pages",
"pages_none",
"graphrag_true",
"graphrag_false",
"graphrag_entity_types",
"graphrag_method_general",
"graphrag_method_light",
"graphrag_community_true",
"graphrag_community_false",
"graphrag_resolution_true",
"graphrag_resolution_false",
"raptor_true",
"raptor_false",
"raptor_prompt",
"raptor_max_token_min",
"raptor_max_token_mid",
"raptor_max_token_max",
"raptor_threshold_min",
"raptor_threshold_mid",
"raptor_threshold_max",
"raptor_max_cluster_min",
"raptor_max_cluster_mid",
"raptor_max_cluster_max",
"raptor_random_seed_min",
],
)
def test_parser_config(self, WebApiAuth, add_dataset_func, parser_config):
kb_id = add_dataset_func
payload = {"name": "parser_config", "description": "", "parser_id": "naive", "kb_id": kb_id, "parser_config": parser_config}
res = update_kb(WebApiAuth, payload)
assert res["code"] == 0, res
assert res["data"]["parser_config"] == parser_config, res
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
{"id": "id"},
{"tenant_id": "e57c1966f99211efb41e9e45646e0111"},
{"created_by": "created_by"},
{"create_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"create_time": 1741671443322},
{"update_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"update_time": 1741671443339},
],
)
def test_field_unsupported(self, WebApiAuth, add_dataset_func, payload):
kb_id = add_dataset_func
full_payload = {"name": "field_unsupported", "description": "", "parser_id": "naive", "kb_id": kb_id, **payload}
res = update_kb(WebApiAuth, full_payload)
assert res["code"] == 101, res
assert "isn't allowed" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_kb_app/test_update_kb.py",
"license": "Apache License 2.0",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:rag/utils/opendal_conn.py | import opendal
import logging
import pymysql
import re
from urllib.parse import quote_plus
from common.config_utils import get_base_config
from common.decorator import singleton
CREATE_TABLE_SQL = """
CREATE TABLE IF NOT EXISTS `{}` (
`key` VARCHAR(255) PRIMARY KEY,
`value` LONGBLOB,
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
`updated_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);
"""
SET_MAX_ALLOWED_PACKET_SQL = """
SET GLOBAL max_allowed_packet={}
"""
def get_opendal_config():
try:
opendal_config = get_base_config('opendal', {})
if opendal_config.get("scheme", "mysql") == 'mysql':
mysql_config = get_base_config('mysql', {})
max_packet = mysql_config.get("max_allowed_packet", 134217728)
kwargs = {
"scheme": "mysql",
"host": mysql_config.get("host", "127.0.0.1"),
"port": str(mysql_config.get("port", 3306)),
"user": mysql_config.get("user", "root"),
"password": mysql_config.get("password", ""),
"database": mysql_config.get("name", "test_open_dal"),
"table": opendal_config.get("config", {}).get("oss_table", "opendal_storage"),
"max_allowed_packet": str(max_packet)
}
kwargs[
"connection_string"] = f"mysql://{kwargs['user']}:{quote_plus(kwargs['password'])}@{kwargs['host']}:{kwargs['port']}/{kwargs['database']}?max_allowed_packet={max_packet}"
else:
scheme = opendal_config.get("scheme")
config_data = opendal_config.get("config", {})
kwargs = {"scheme": scheme, **config_data}
# Only include non-sensitive keys in logs. Do NOT
# add 'password' or any key containing embedded credentials
# (like 'connection_string').
safe_log_info = {
"scheme": kwargs.get("scheme"),
"host": kwargs.get("host"),
"port": kwargs.get("port"),
"database": kwargs.get("database"),
"table": kwargs.get("table"),
# indicate presence of credentials without logging them
"has_credentials": any(k in kwargs for k in ("password", "connection_string")),
}
logging.info("Loaded OpenDAL configuration (non sensitive fields only): %s", safe_log_info)
return kwargs
except Exception as e:
logging.error("Failed to load OpenDAL configuration from yaml: %s", str(e))
raise
@singleton
class OpenDALStorage:
def __init__(self):
self._kwargs = get_opendal_config()
self._scheme = self._kwargs.get('scheme', 'mysql')
if self._scheme == 'mysql':
self.init_db_config()
self.init_opendal_mysql_table()
self._operator = opendal.Operator(**self._kwargs)
logging.info("OpenDALStorage initialized successfully")
def health(self):
bucket, fnm, binary = "txtxtxtxt1", "txtxtxtxt1", b"_t@@@1"
return self._operator.write(f"{bucket}/{fnm}", binary)
def put(self, bucket, fnm, binary, tenant_id=None):
self._operator.write(f"{bucket}/{fnm}", binary)
def get(self, bucket, fnm, tenant_id=None):
return self._operator.read(f"{bucket}/{fnm}")
def rm(self, bucket, fnm, tenant_id=None):
self._operator.delete(f"{bucket}/{fnm}")
self._operator.__init__()
def scan(self, bucket, fnm, tenant_id=None):
return self._operator.scan(f"{bucket}/{fnm}")
def obj_exist(self, bucket, fnm, tenant_id=None):
return self._operator.exists(f"{bucket}/{fnm}")
def init_db_config(self):
try:
conn = pymysql.connect(
host=self._kwargs['host'],
port=int(self._kwargs['port']),
user=self._kwargs['user'],
password=self._kwargs['password'],
database=self._kwargs['database']
)
cursor = conn.cursor()
max_packet = self._kwargs.get('max_allowed_packet', 4194304) # Default to 4MB if not specified
# Ensure max_packet is a valid integer to prevent SQL injection
cursor.execute(SET_MAX_ALLOWED_PACKET_SQL.format(int(max_packet)))
conn.commit()
cursor.close()
conn.close()
logging.info(f"Database configuration initialized with max_allowed_packet={max_packet}")
except Exception as e:
logging.error(f"Failed to initialize database configuration: {str(e)}")
raise
def init_opendal_mysql_table(self):
table_name = self._kwargs['table']
# Validate table name to prevent SQL injection
if not re.match(r'^[a-zA-Z0-9_]+$', table_name):
raise ValueError(f"Invalid table name: {table_name}")
conn = pymysql.connect(
host=self._kwargs['host'],
port=int(self._kwargs['port']),
user=self._kwargs['user'],
password=self._kwargs['password'],
database=self._kwargs['database']
)
cursor = conn.cursor()
cursor.execute(CREATE_TABLE_SQL.format(table_name))
conn.commit()
cursor.close()
conn.close()
logging.info(f"Table `{table_name}` initialized.")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/utils/opendal_conn.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/testcases/test_sdk_api/test_session_management/test_create_session_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import SESSION_WITH_CHAT_NAME_LIMIT
from ragflow_sdk import RAGFlow
from ragflow_sdk.modules.session import Session
class _DummyStreamResponse:
def __init__(self, lines):
self._lines = lines
def iter_lines(self, decode_unicode=True):
del decode_unicode
for line in self._lines:
yield line
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
@pytest.mark.usefixtures("clear_session_with_chat_assistants")
class TestSessionWithChatAssistantCreate:
@pytest.mark.p1
@pytest.mark.parametrize(
"name, expected_message",
[
("valid_name", ""),
pytest.param("a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1), "", marks=pytest.mark.skip(reason="issues/")),
pytest.param(1, "", marks=pytest.mark.skip(reason="issues/")),
("", "`name` can not be empty."),
("duplicated_name", ""),
("case insensitive", ""),
],
)
def test_name(self, add_chat_assistants, name, expected_message):
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
if name == "duplicated_name":
chat_assistant.create_session(name=name)
elif name == "case insensitive":
chat_assistant.create_session(name=name.upper())
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.create_session(name=name)
assert expected_message in str(exception_info.value)
else:
session = chat_assistant.create_session(name=name)
assert session.name == name, str(session)
assert session.chat_id == chat_assistant.id, str(session)
@pytest.mark.p3
def test_concurrent_create_session(self, add_chat_assistants):
count = 1000
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chat_assistant.create_session, name=f"session with chat assistant test {i}") for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
updated_sessions = chat_assistant.list_sessions(page_size=count * 2)
assert len(updated_sessions) == count
@pytest.mark.p3
def test_add_session_to_deleted_chat_assistant(self, client, add_chat_assistants):
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
client.delete_chats(ids=[chat_assistant.id])
with pytest.raises(Exception) as exception_info:
chat_assistant.create_session(name="valid_name")
assert "You do not own the assistant" in str(exception_info.value)
@pytest.mark.p2
def test_session_module_streaming_and_helper_paths_unit(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
chat_session = Session(client, {"id": "session-chat", "chat_id": "chat-1"})
chat_done_session = Session(client, {"id": "session-chat-done", "chat_id": "chat-1"})
agent_session = Session(client, {"id": "session-agent", "agent_id": "agent-1"})
calls = []
chat_stream = _DummyStreamResponse(
[
"",
"data: {bad json}",
'data: {"event":"workflow_started","data":{"content":"skip"}}',
'{"data":{"answer":"chat-answer","reference":{"chunks":[{"id":"chunk-1"}]}}}',
'data: {"data": true}',
"data: [DONE]",
]
)
agent_stream = _DummyStreamResponse(
[
"data: {bad json}",
'data: {"event":"message","data":{"content":"agent-answer"}}',
'data: {"event":"message_end","data":{"content":"done"}}',
]
)
def _chat_post(path, json=None, stream=False, files=None):
calls.append(("chat", path, json, stream, files))
return chat_stream
def _agent_post(path, json=None, stream=False, files=None):
calls.append(("agent", path, json, stream, files))
return agent_stream
monkeypatch.setattr(chat_session, "post", _chat_post)
monkeypatch.setattr(
chat_done_session,
"post",
lambda *_args, **_kwargs: _DummyStreamResponse(
['{"data":{"answer":"chat-done","reference":{"chunks":[]}}}', "data: [DONE]"]
),
)
monkeypatch.setattr(agent_session, "post", _agent_post)
chat_messages = list(chat_session.ask("hello chat", stream=True, temperature=0.2))
assert len(chat_messages) == 1
assert chat_messages[0].content == "chat-answer"
assert chat_messages[0].reference == [{"id": "chunk-1"}]
chat_done_messages = list(chat_done_session.ask("hello done", stream=True))
assert len(chat_done_messages) == 1
assert chat_done_messages[0].content == "chat-done"
agent_messages = list(agent_session.ask("hello agent", stream=True, top_p=0.8))
assert len(agent_messages) == 1
assert agent_messages[0].content == "agent-answer"
assert calls[0][1] == "/chats/chat-1/completions"
assert calls[0][2]["question"] == "hello chat"
assert calls[0][2]["session_id"] == "session-chat"
assert calls[0][2]["temperature"] == 0.2
assert calls[0][3] is True
assert calls[1][1] == "/agents/agent-1/completions"
assert calls[1][2]["question"] == "hello agent"
assert calls[1][2]["session_id"] == "session-agent"
assert calls[1][2]["top_p"] == 0.8
assert calls[1][3] is True
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_session_management/test_create_session_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_session_management/test_delete_sessions_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_sessions_with_chat_assistant
class TestSessionWithChatAssistantDelete:
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, add_sessions_with_chat_assistant_func, payload):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
if callable(payload):
payload = payload([session.id for session in sessions])
chat_assistant.delete_sessions(**payload)
sessions = chat_assistant.list_sessions()
assert len(sessions) == 0
@pytest.mark.p3
def test_repeated_deletion(self, add_sessions_with_chat_assistant_func):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
session_ids = {"ids": [session.id for session in sessions]}
chat_assistant.delete_sessions(**session_ids)
with pytest.raises(Exception) as exception_info:
chat_assistant.delete_sessions(**session_ids)
assert "The chat doesn't own the session" in str(exception_info.value)
@pytest.mark.p3
def test_duplicate_deletion(self, add_sessions_with_chat_assistant_func):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
session_ids = {"ids": [session.id for session in sessions] * 2}
chat_assistant.delete_sessions(**session_ids)
sessions = chat_assistant.list_sessions()
assert len(sessions) == 0
@pytest.mark.p3
def test_concurrent_deletion(self, add_chat_assistants):
count = 100
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
sessions = batch_add_sessions_with_chat_assistant(chat_assistant, count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chat_assistant.delete_sessions, ids=[sessions[i].id]) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_delete_1k(self, add_chat_assistants):
count = 1_000
_, _, chat_assistants = add_chat_assistants
chat_assistant = chat_assistants[0]
ssessions = batch_add_sessions_with_chat_assistant(chat_assistant, count)
chat_assistant.delete_sessions(ids=[ssession.id for ssession in ssessions])
sessions = chat_assistant.list_sessions()
assert len(sessions) == 0
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
pytest.param(None, """TypeError("argument of type \'NoneType\' is not iterable")""", 0, marks=pytest.mark.skip),
pytest.param({"ids": ["invalid_id"]}, "The chat doesn't own the session invalid_id", 5, marks=pytest.mark.p3),
pytest.param("not json", """AttributeError("\'str\' object has no attribute \'get\'")""", 5, marks=pytest.mark.skip),
pytest.param(lambda r: {"ids": r[:1]}, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, "", 0, marks=pytest.mark.p1),
pytest.param({"ids": []}, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(self, add_sessions_with_chat_assistant_func, payload, expected_message, remaining):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
if callable(payload):
payload = payload([session.id for session in sessions])
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.delete_sessions(**payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.delete_sessions(**payload)
sessions = chat_assistant.list_sessions()
assert len(sessions) == remaining
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_session_management/test_delete_sessions_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_session_management/test_list_sessions_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from concurrent.futures import ThreadPoolExecutor, as_completed
from ragflow_sdk import RAGFlow
from ragflow_sdk.modules.session import Message, Session
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
class TestSessionsWithChatAssistantList:
@pytest.mark.p2
def test_list_sessions_raises_on_nonzero_response(self, add_sessions_with_chat_assistant, monkeypatch):
chat_assistant, _ = add_sessions_with_chat_assistant
class _DummyResponse:
def json(self):
return {"code": 1, "message": "boom"}
monkeypatch.setattr(chat_assistant, "get", lambda *_args, **_kwargs: _DummyResponse())
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions()
assert "boom" in str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 0, "not instance of"),
pytest.param({"page": 0, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 0, "not instance of"),
pytest.param({"page": -1, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "page_size": 2}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, add_sessions_with_chat_assistant, params, expected_page_size, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
sessions = chat_assistant.list_sessions(**params)
assert len(sessions) == expected_page_size
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": None}, 0, "not instance of"),
({"page_size": 0}, 0, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 0, "not instance of"),
pytest.param({"page_size": -1}, 5, "", marks=pytest.mark.skip),
pytest.param({"page_size": "a"}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, add_sessions_with_chat_assistant, params, expected_page_size, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
sessions = chat_assistant.list_sessions(**params)
assert len(sessions) == expected_page_size
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"orderby": None}, "not instance of"),
({"orderby": "create_time"}, ""),
({"orderby": "update_time"}, ""),
({"orderby": "name", "desc": "False"}, "not instance of"),
pytest.param({"orderby": "unknown"}, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/")),
],
)
def test_orderby(self, add_sessions_with_chat_assistant, params, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
chat_assistant.list_sessions(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"desc": None}, "not instance of"),
({"desc": "true"}, "not instance of"),
({"desc": "True"}, "not instance of"),
({"desc": True}, ""),
({"desc": "false"}, "not instance of"),
({"desc": "False"}, "not instance of"),
({"desc": False}, ""),
({"desc": "False", "orderby": "update_time"}, "not instance of"),
pytest.param({"desc": "unknown"}, "desc should be true or false", marks=pytest.mark.skip(reason="issues/")),
],
)
def test_desc(self, add_sessions_with_chat_assistant, params, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
chat_assistant.list_sessions(**params)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_num, expected_message",
[
({"name": None}, 0, "not instance of"),
({"name": ""}, 5, ""),
({"name": "session_with_chat_assistant_1"}, 1, ""),
({"name": "unknown"}, 0, ""),
],
)
def test_name(self, add_sessions_with_chat_assistant, params, expected_num, expected_message):
chat_assistant, _ = add_sessions_with_chat_assistant
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
sessions = chat_assistant.list_sessions(**params)
if params["name"] == "session_with_chat_assistant_1":
assert sessions[0].name == params["name"]
else:
assert len(sessions) == expected_num
@pytest.mark.p1
@pytest.mark.parametrize(
"session_id, expected_num, expected_message",
[
(None, 0, "not instance of"),
("", 5, ""),
(lambda r: r[0], 1, ""),
("unknown", 0, ""),
],
)
def test_id(self, add_sessions_with_chat_assistant, session_id, expected_num, expected_message):
chat_assistant, sessions = add_sessions_with_chat_assistant
if callable(session_id):
params = {"id": session_id([s.id for s in sessions])}
else:
params = {"id": session_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
list_sessions = chat_assistant.list_sessions(**params)
if "id" in params and params["id"] == sessions[0].id:
assert list_sessions[0].id == params["id"]
else:
assert len(list_sessions) == expected_num
@pytest.mark.p3
@pytest.mark.parametrize(
"session_id, name, expected_num, expected_message",
[
(lambda r: r[0], "session_with_chat_assistant_0", 1, ""),
(lambda r: r[0], "session_with_chat_assistant_100", 0, ""),
(lambda r: r[0], "unknown", 0, ""),
("id", "session_with_chat_assistant_0", 0, ""),
],
)
def test_name_and_id(self, add_sessions_with_chat_assistant, session_id, name, expected_num, expected_message):
chat_assistant, sessions = add_sessions_with_chat_assistant
if callable(session_id):
params = {"id": session_id([s.id for s in sessions]), "name": name}
else:
params = {"id": session_id, "name": name}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions(**params)
assert expected_message in str(exception_info.value)
else:
list_sessions = chat_assistant.list_sessions(**params)
assert len(list_sessions) == expected_num
@pytest.mark.p3
def test_concurrent_list(self, add_sessions_with_chat_assistant):
count = 100
chat_assistant, _ = add_sessions_with_chat_assistant
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chat_assistant.list_sessions) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_list_chats_after_deleting_associated_chat_assistant(self, client, add_sessions_with_chat_assistant):
chat_assistant, _ = add_sessions_with_chat_assistant
client.delete_chats(ids=[chat_assistant.id])
with pytest.raises(Exception) as exception_info:
chat_assistant.list_sessions()
assert "You don't own the assistant" in str(exception_info.value)
@pytest.mark.p2
def test_session_module_error_paths_unit(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
unknown_session = Session(client, {"id": "session-unknown", "chat_id": "chat-1"})
unknown_session._Session__session_type = "unknown" # noqa: SLF001
with pytest.raises(Exception) as exception_info:
list(unknown_session.ask("hello", stream=False))
assert "Unknown session type" in str(exception_info.value)
bad_json_session = Session(client, {"id": "session-bad-json", "chat_id": "chat-1"})
class _BadJsonResponse:
def json(self):
raise ValueError("json decode failed")
monkeypatch.setattr(bad_json_session, "post", lambda *_args, **_kwargs: _BadJsonResponse())
with pytest.raises(Exception) as exception_info:
list(bad_json_session.ask("hello", stream=False))
assert "Invalid response" in str(exception_info.value)
ok_json_session = Session(client, {"id": "session-ok-json", "chat_id": "chat-1"})
class _OkJsonResponse:
def json(self):
return {"data": {"answer": "ok-answer", "reference": {"chunks": [{"id": "chunk-ok"}]}}}
monkeypatch.setattr(ok_json_session, "post", lambda *_args, **_kwargs: _OkJsonResponse())
ok_messages = list(ok_json_session.ask("hello", stream=False))
assert len(ok_messages) == 1
assert ok_messages[0].content == "ok-answer"
assert ok_messages[0].reference == [{"id": "chunk-ok"}]
transport_session = Session(client, {"id": "session-transport", "chat_id": "chat-1"})
monkeypatch.setattr(
transport_session,
"post",
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("transport boom")),
)
with pytest.raises(RuntimeError) as exception_info:
list(transport_session.ask("hello", stream=False))
assert "transport boom" in str(exception_info.value)
message = Message(client, {})
assert message.content == "Hi! I am your assistant, can I help you?"
assert message.reference is None
assert message.role == "assistant"
assert message.prompt is None
assert message.id is None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_session_management/test_list_sessions_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_session_management/test_update_session_with_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
import pytest
from configs import SESSION_WITH_CHAT_NAME_LIMIT
class TestSessionWithChatAssistantUpdate:
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param({"name": "valid_name"}, "", marks=pytest.mark.p1),
pytest.param({"name": "a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1)}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": ""}, "`name` can not be empty.", marks=pytest.mark.p3),
pytest.param({"name": "duplicated_name"}, "", marks=pytest.mark.p3),
pytest.param({"name": "case insensitive"}, "", marks=pytest.mark.p3),
],
)
def test_name(self, add_sessions_with_chat_assistant_func, payload, expected_message):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
session = sessions[0]
if payload["name"] == "duplicated_name":
session.update(payload)
elif payload["name"] == "case insensitive":
session.update({"name": payload["name"].upper()})
if expected_message:
with pytest.raises(Exception) as exception_info:
session.update(payload)
assert expected_message in str(exception_info.value)
else:
session.update(payload)
updated_session = chat_assistant.list_sessions(id=session.id)[0]
assert updated_session.name == payload["name"]
@pytest.mark.p3
def test_repeated_update_session(self, add_sessions_with_chat_assistant_func):
_, sessions = add_sessions_with_chat_assistant_func
session = sessions[0]
session.update({"name": "valid_name_1"})
session.update({"name": "valid_name_2"})
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param({"unknown_key": "unknown_value"}, "ValueError", marks=pytest.mark.skip),
({}, ""),
pytest.param(None, "TypeError", marks=pytest.mark.skip),
],
)
def test_invalid_params(self, add_sessions_with_chat_assistant_func, payload, expected_message):
_, sessions = add_sessions_with_chat_assistant_func
session = sessions[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
session.update(payload)
assert expected_message in str(exception_info.value)
else:
session.update(payload)
@pytest.mark.p3
def test_concurrent_update_session(self, add_sessions_with_chat_assistant_func):
count = 50
_, sessions = add_sessions_with_chat_assistant_func
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(sessions[randint(0, 4)].update, {"name": f"update session test {i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_update_session_to_deleted_chat_assistant(self, client, add_sessions_with_chat_assistant_func):
chat_assistant, sessions = add_sessions_with_chat_assistant_func
client.delete_chats(ids=[chat_assistant.id])
with pytest.raises(Exception) as exception_info:
sessions[0].update({"name": "valid_name"})
assert "You do not own the session" in str(exception_info.value)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_session_management/test_update_session_with_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chat_assistant_management/test_create_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import attrgetter
import pytest
from configs import CHAT_ASSISTANT_NAME_LIMIT
from ragflow_sdk import Chat
from utils import encode_avatar
from utils.file_utils import create_image_file
@pytest.mark.usefixtures("clear_chat_assistants")
class TestChatAssistantCreate:
@pytest.mark.p1
@pytest.mark.usefixtures("add_chunks")
@pytest.mark.parametrize(
"name, expected_message",
[
("valid_name", ""),
pytest.param("a" * (CHAT_ASSISTANT_NAME_LIMIT + 1), "", marks=pytest.mark.skip(reason="issues/")),
pytest.param(1, "", marks=pytest.mark.skip(reason="issues/")),
("", "`name` is required."),
("duplicated_name", "Duplicated chat name in creating chat."),
("case insensitive", "Duplicated chat name in creating chat."),
],
)
def test_name(self, client, name, expected_message):
if name == "duplicated_name":
client.create_chat(name=name)
elif name == "case insensitive":
client.create_chat(name=name.upper())
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name=name)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name=name)
assert chat_assistant.name == name
@pytest.mark.p1
@pytest.mark.parametrize(
"dataset_ids, expected_message",
[
([], ""),
(lambda r: [r], ""),
(["invalid_dataset_id"], "You don't own the dataset invalid_dataset_id"),
("invalid_dataset_id", "You don't own the dataset i"),
],
)
def test_dataset_ids(self, client, add_chunks, dataset_ids, expected_message):
dataset, _, _ = add_chunks
if callable(dataset_ids):
dataset_ids = dataset_ids(dataset.id)
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name="ragflow test", dataset_ids=dataset_ids)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name="ragflow test", dataset_ids=dataset_ids)
assert chat_assistant.name == "ragflow test"
@pytest.mark.p3
def test_avatar(self, client, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png")
chat_assistant = client.create_chat(name="avatar_test", avatar=encode_avatar(fn), dataset_ids=[])
assert chat_assistant.name == "avatar_test"
@pytest.mark.p3
@pytest.mark.parametrize(
"llm, expected_message",
[
({}, ""),
({"model_name": "glm-4"}, ""),
({"model_name": "unknown"}, "`model_name` unknown doesn't exist"),
({"temperature": 0}, ""),
({"temperature": 1}, ""),
pytest.param({"temperature": -1}, "", marks=pytest.mark.skip),
pytest.param({"temperature": 10}, "", marks=pytest.mark.skip),
pytest.param({"temperature": "a"}, "", marks=pytest.mark.skip),
({"top_p": 0}, ""),
({"top_p": 1}, ""),
pytest.param({"top_p": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_p": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_p": "a"}, "", marks=pytest.mark.skip),
({"presence_penalty": 0}, ""),
({"presence_penalty": 1}, ""),
pytest.param({"presence_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": "a"}, "", marks=pytest.mark.skip),
({"frequency_penalty": 0}, ""),
({"frequency_penalty": 1}, ""),
pytest.param({"frequency_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": "a"}, "", marks=pytest.mark.skip),
({"max_token": 0}, ""),
({"max_token": 1024}, ""),
pytest.param({"max_token": -1}, "", marks=pytest.mark.skip),
pytest.param({"max_token": 10}, "", marks=pytest.mark.skip),
pytest.param({"max_token": "a"}, "", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_llm(self, client, add_chunks, llm, expected_message):
dataset, _, _ = add_chunks
llm_o = Chat.LLM(client, llm)
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name="llm_test", dataset_ids=[dataset.id], llm=llm_o)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name="llm_test", dataset_ids=[dataset.id], llm=llm_o)
if llm:
for k, v in llm.items():
assert attrgetter(k)(chat_assistant.llm) == v
else:
assert attrgetter("model_name")(chat_assistant.llm) == "glm-4-flash@ZHIPU-AI"
assert attrgetter("temperature")(chat_assistant.llm) == 0.1
assert attrgetter("top_p")(chat_assistant.llm) == 0.3
assert attrgetter("presence_penalty")(chat_assistant.llm) == 0.4
assert attrgetter("frequency_penalty")(chat_assistant.llm) == 0.7
assert attrgetter("max_tokens")(chat_assistant.llm) == 512
@pytest.mark.p3
@pytest.mark.parametrize(
"prompt, expected_message",
[
({"similarity_threshold": 0}, ""),
({"similarity_threshold": 1}, ""),
pytest.param({"similarity_threshold": -1}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": 10}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": "a"}, "", marks=pytest.mark.skip),
({"keywords_similarity_weight": 0}, ""),
({"keywords_similarity_weight": 1}, ""),
pytest.param({"keywords_similarity_weight": -1}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": 10}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": "a"}, "", marks=pytest.mark.skip),
({"variables": []}, ""),
({"top_n": 0}, ""),
({"top_n": 1}, ""),
pytest.param({"top_n": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_n": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_n": "a"}, "", marks=pytest.mark.skip),
({"empty_response": "Hello World"}, ""),
({"empty_response": ""}, ""),
({"empty_response": "!@#$%^&*()"}, ""),
({"empty_response": "ไธญๆๆต่ฏ"}, ""),
pytest.param({"empty_response": 123}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": True}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": " "}, "", marks=pytest.mark.skip),
({"opener": "Hello World"}, ""),
({"opener": ""}, ""),
({"opener": "!@#$%^&*()"}, ""),
({"opener": "ไธญๆๆต่ฏ"}, ""),
pytest.param({"opener": 123}, "", marks=pytest.mark.skip),
pytest.param({"opener": True}, "", marks=pytest.mark.skip),
pytest.param({"opener": " "}, "", marks=pytest.mark.skip),
({"show_quote": True}, ""),
({"show_quote": False}, ""),
({"prompt": "Hello World {knowledge}"}, ""),
({"prompt": "{knowledge}"}, ""),
({"prompt": "!@#$%^&*() {knowledge}"}, ""),
({"prompt": "ไธญๆๆต่ฏ {knowledge}"}, ""),
({"prompt": "Hello World"}, ""),
({"prompt": "Hello World", "variables": []}, ""),
pytest.param({"prompt": 123}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"prompt": True}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_prompt(self, client, add_chunks, prompt, expected_message):
dataset, _, _ = add_chunks
prompt_o = Chat.Prompt(client, prompt)
if expected_message:
with pytest.raises(Exception) as exception_info:
client.create_chat(name="prompt_test", dataset_ids=[dataset.id], prompt=prompt_o)
assert expected_message in str(exception_info.value)
else:
chat_assistant = client.create_chat(name="prompt_test", dataset_ids=[dataset.id], prompt=prompt_o)
if prompt:
for k, v in prompt.items():
if k == "keywords_similarity_weight":
assert attrgetter(k)(chat_assistant.prompt) == 1 - v
else:
assert attrgetter(k)(chat_assistant.prompt) == v
else:
assert attrgetter("similarity_threshold")(chat_assistant.prompt) == 0.2
assert attrgetter("keywords_similarity_weight")(chat_assistant.prompt) == 0.7
assert attrgetter("top_n")(chat_assistant.prompt) == 6
assert attrgetter("variables")(chat_assistant.prompt) == [{"key": "knowledge", "optional": False}]
assert attrgetter("rerank_model")(chat_assistant.prompt) == ""
assert attrgetter("empty_response")(chat_assistant.prompt) == "Sorry! No relevant content was found in the knowledge base!"
assert attrgetter("opener")(chat_assistant.prompt) == "Hi! I'm your assistant. What can I do for you?"
assert attrgetter("show_quote")(chat_assistant.prompt) is True
assert (
attrgetter("prompt")(chat_assistant.prompt)
== 'You are an intelligent assistant. Please summarize the content of the dataset to answer the question. Please list the data in the dataset and answer in detail. When all dataset content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the dataset!" Answers need to consider chat history.\n Here is the knowledge base:\n {knowledge}\n The above is the knowledge base.'
)
class TestChatAssistantCreate2:
@pytest.mark.p2
def test_unparsed_document(self, client, add_document):
dataset, _ = add_document
with pytest.raises(Exception) as exception_info:
client.create_chat(name="prompt_test", dataset_ids=[dataset.id])
assert "doesn't own parsed file" in str(exception_info.value)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chat_assistant_management/test_create_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_create_chat_assistants
class TestChatAssistantsDelete:
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
pytest.param(None, "", 0, marks=pytest.mark.p3),
pytest.param({"ids": []}, "", 0, marks=pytest.mark.p3),
pytest.param({"ids": ["invalid_id"]}, "Assistant(invalid_id) not found.", 5, marks=pytest.mark.p3),
pytest.param({"ids": ["\n!?ใ๏ผ๏ผ๏ผ\"'"]}, """Assistant(\n!?ใ๏ผ๏ผ๏ผ"\') not found.""", 5, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1]}, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, "", 0, marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, client, add_chat_assistants_func, payload, expected_message, remaining):
_, _, chat_assistants = add_chat_assistants_func
if callable(payload):
payload = payload([chat_assistant.id for chat_assistant in chat_assistants])
if expected_message:
with pytest.raises(Exception) as exception_info:
client.delete_chats(**payload)
assert expected_message in str(exception_info.value)
else:
if payload is None:
client.delete_chats(payload)
else:
client.delete_chats(**payload)
assistants = client.list_chats()
assert len(assistants) == remaining
@pytest.mark.p2
def test_delete_chats_nonzero_response_raises(self, client, monkeypatch):
class _DummyResponse:
def json(self):
return {"code": 1, "message": "boom"}
monkeypatch.setattr(client, "delete", lambda *_args, **_kwargs: _DummyResponse())
with pytest.raises(Exception) as exception_info:
client.delete_chats(ids=["chat-1"])
assert "boom" in str(exception_info.value), str(exception_info.value)
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, client, add_chat_assistants_func, payload):
_, _, chat_assistants = add_chat_assistants_func
payload = payload([chat_assistant.id for chat_assistant in chat_assistants])
client.delete_chats(**payload)
assistants = client.list_chats()
assert len(assistants) == 0
@pytest.mark.p3
def test_repeated_deletion(self, client, add_chat_assistants_func):
_, _, chat_assistants = add_chat_assistants_func
chat_ids = [chat.id for chat in chat_assistants]
client.delete_chats(ids=chat_ids)
with pytest.raises(Exception) as exception_info:
client.delete_chats(ids=chat_ids)
assert "not found" in str(exception_info.value)
@pytest.mark.p3
def test_duplicate_deletion(self, client, add_chat_assistants_func):
_, _, chat_assistants = add_chat_assistants_func
chat_ids = [chat.id for chat in chat_assistants]
client.delete_chats(ids=chat_ids + chat_ids)
assistants = client.list_chats()
assert len(assistants) == 0
@pytest.mark.p3
def test_concurrent_deletion(self, client):
count = 100
chat_ids = [client.create_chat(name=f"test_{i}").id for i in range(count)]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.delete_chats, ids=[chat_ids[i]]) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count
assert all(future.exception() is None for future in futures)
@pytest.mark.p3
def test_delete_1k(self, client):
chat_assistants = batch_create_chat_assistants(client, 1_000)
client.delete_chats(ids=[chat_assistants.id for chat_assistants in chat_assistants])
assistants = client.list_chats()
assert len(assistants) == 0
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chat_assistant_management/test_list_chat_assistants.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
@pytest.mark.usefixtures("add_chat_assistants")
class TestChatAssistantsList:
@pytest.mark.p1
def test_default(self, client):
assistants = client.list_chats()
assert len(assistants) == 5
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": 0, "page_size": 2}, 2, ""),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 0, "not instance of"),
pytest.param(
{"page": -1, "page_size": 2},
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page": "a", "page_size": 2},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page(self, client, params, expected_page_size, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
assert len(assistants) == expected_page_size
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": 0}, 0, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 0, "not instance of"),
pytest.param(
{"page_size": -1},
0,
"1064",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page_size": "a"},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page_size(self, client, params, expected_page_size, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
assert len(assistants) == expected_page_size
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"orderby": "create_time"}, ""),
({"orderby": "update_time"}, ""),
pytest.param({"orderby": "name", "desc": "False"}, "", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"orderby": "unknown"}, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_orderby(self, client, params, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
client.list_chats(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"desc": None}, "not instance of"),
({"desc": "true"}, "not instance of"),
({"desc": "True"}, "not instance of"),
({"desc": True}, ""),
({"desc": "false"}, "not instance of"),
({"desc": "False"}, "not instance of"),
({"desc": False}, ""),
({"desc": "False", "orderby": "update_time"}, "not instance of"),
pytest.param(
{"desc": "unknown"},
"desc should be true or false",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_desc(self, client, params, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
client.list_chats(**params)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_num, expected_message",
[
({"name": None}, 5, ""),
({"name": ""}, 5, ""),
({"name": "test_chat_assistant_1"}, 1, ""),
({"name": "unknown"}, 0, "The chat doesn't exist"),
],
)
def test_name(self, client, params, expected_num, expected_message):
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
if params["name"] in [None, ""]:
assert len(assistants) == expected_num
else:
assert assistants[0].name == params["name"]
@pytest.mark.p1
@pytest.mark.parametrize(
"chat_assistant_id, expected_num, expected_message",
[
(None, 5, ""),
("", 5, ""),
(lambda r: r[0], 1, ""),
("unknown", 0, "The chat doesn't exist"),
],
)
def test_id(self, client, add_chat_assistants, chat_assistant_id, expected_num, expected_message):
_, _, chat_assistants = add_chat_assistants
if callable(chat_assistant_id):
params = {"id": chat_assistant_id([chat.id for chat in chat_assistants])}
else:
params = {"id": chat_assistant_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
if params["id"] in [None, ""]:
assert len(assistants) == expected_num
else:
assert assistants[0].id == params["id"]
@pytest.mark.p3
@pytest.mark.parametrize(
"chat_assistant_id, name, expected_num, expected_message",
[
(lambda r: r[0], "test_chat_assistant_0", 1, ""),
(lambda r: r[0], "test_chat_assistant_1", 0, "The chat doesn't exist"),
(lambda r: r[0], "unknown", 0, "The chat doesn't exist"),
("id", "chat_assistant_0", 0, "The chat doesn't exist"),
],
)
def test_name_and_id(self, client, add_chat_assistants, chat_assistant_id, name, expected_num, expected_message):
_, _, chat_assistants = add_chat_assistants
if callable(chat_assistant_id):
params = {"id": chat_assistant_id([chat.id for chat in chat_assistants]), "name": name}
else:
params = {"id": chat_assistant_id, "name": name}
if expected_message:
with pytest.raises(Exception) as exception_info:
client.list_chats(**params)
assert expected_message in str(exception_info.value)
else:
assistants = client.list_chats(**params)
assert len(assistants) == expected_num
@pytest.mark.p3
def test_concurrent_list(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.list_chats) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p2
def test_list_chats_after_deleting_associated_dataset(self, client, add_chat_assistants):
dataset, _, _ = add_chat_assistants
client.delete_datasets(ids=[dataset.id])
assistants = client.list_chats()
assert len(assistants) == 5
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chat_assistant_management/test_list_chat_assistants.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chat_assistant_management/test_update_chat_assistant.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import attrgetter
import pytest
from configs import CHAT_ASSISTANT_NAME_LIMIT
from ragflow_sdk import Chat
from utils import encode_avatar
from utils.file_utils import create_image_file
class TestChatAssistantUpdate:
@pytest.mark.p2
def test_update_rejects_non_dict_and_empty_llm_prompt(self, add_chat_assistants_func):
_, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
with pytest.raises(Exception) as exception_info:
chat_assistant.update.__wrapped__(chat_assistant, "bad")
assert "`update_message` must be a dict" in str(exception_info.value)
with pytest.raises(Exception) as exception_info:
chat_assistant.update({"llm": {}})
assert "`llm` cannot be empty" in str(exception_info.value)
with pytest.raises(Exception) as exception_info:
chat_assistant.update({"prompt": {}})
assert "`prompt` cannot be empty" in str(exception_info.value)
@pytest.mark.p2
def test_update_raises_on_nonzero_response(self, add_chat_assistants_func, monkeypatch):
_, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
class _DummyResponse:
def json(self):
return {"code": 1, "message": "boom"}
monkeypatch.setattr(chat_assistant, "put", lambda *_args, **_kwargs: _DummyResponse())
with pytest.raises(Exception) as exception_info:
chat_assistant.update({"name": "error-case"})
assert "boom" in str(exception_info.value)
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param({"name": "valid_name"}, "", marks=pytest.mark.p1),
pytest.param({"name": "a" * (CHAT_ASSISTANT_NAME_LIMIT + 1)}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": 1}, "", marks=pytest.mark.skip(reason="issues/")),
pytest.param({"name": ""}, "`name` cannot be empty.", marks=pytest.mark.p3),
pytest.param({"name": "test_chat_assistant_1"}, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
pytest.param({"name": "TEST_CHAT_ASSISTANT_1"}, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
],
)
def test_name(self, client, add_chat_assistants_func, payload, expected_message):
_, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.update(payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
assert updated_chat.name == payload["name"], str(updated_chat)
@pytest.mark.p3
def test_avatar(self, client, add_chat_assistants_func, tmp_path):
dataset, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": [dataset.id]}
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
assert updated_chat.name == payload["name"], str(updated_chat)
assert updated_chat.avatar is not None, str(updated_chat)
@pytest.mark.p3
@pytest.mark.parametrize(
"llm, expected_message",
[
({}, "ValueError"),
({"model_name": "glm-4"}, ""),
({"model_name": "unknown"}, "`model_name` unknown doesn't exist"),
({"temperature": 0}, ""),
({"temperature": 1}, ""),
pytest.param({"temperature": -1}, "", marks=pytest.mark.skip),
pytest.param({"temperature": 10}, "", marks=pytest.mark.skip),
pytest.param({"temperature": "a"}, "", marks=pytest.mark.skip),
({"top_p": 0}, ""),
({"top_p": 1}, ""),
pytest.param({"top_p": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_p": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_p": "a"}, "", marks=pytest.mark.skip),
({"presence_penalty": 0}, ""),
({"presence_penalty": 1}, ""),
pytest.param({"presence_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"presence_penalty": "a"}, "", marks=pytest.mark.skip),
({"frequency_penalty": 0}, ""),
({"frequency_penalty": 1}, ""),
pytest.param({"frequency_penalty": -1}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": 10}, "", marks=pytest.mark.skip),
pytest.param({"frequency_penalty": "a"}, "", marks=pytest.mark.skip),
({"max_token": 0}, ""),
({"max_token": 1024}, ""),
pytest.param({"max_token": -1}, "", marks=pytest.mark.skip),
pytest.param({"max_token": 10}, "", marks=pytest.mark.skip),
pytest.param({"max_token": "a"}, "", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_llm(self, client, add_chat_assistants_func, llm, expected_message):
dataset, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
payload = {"name": "llm_test", "llm": llm, "dataset_ids": [dataset.id]}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.update(payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
if llm:
for k, v in llm.items():
assert attrgetter(k)(updated_chat.llm) == v, str(updated_chat)
else:
excepted_value = Chat.LLM(
client,
{
"model_name": "glm-4-flash@ZHIPU-AI",
"temperature": 0.1,
"top_p": 0.3,
"presence_penalty": 0.4,
"frequency_penalty": 0.7,
"max_tokens": 512,
},
)
assert str(updated_chat.llm) == str(excepted_value), str(updated_chat)
@pytest.mark.p3
@pytest.mark.parametrize(
"prompt, expected_message",
[
({}, "ValueError"),
({"similarity_threshold": 0}, ""),
({"similarity_threshold": 1}, ""),
pytest.param({"similarity_threshold": -1}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": 10}, "", marks=pytest.mark.skip),
pytest.param({"similarity_threshold": "a"}, "", marks=pytest.mark.skip),
({"keywords_similarity_weight": 0}, ""),
({"keywords_similarity_weight": 1}, ""),
pytest.param({"keywords_similarity_weight": -1}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": 10}, "", marks=pytest.mark.skip),
pytest.param({"keywords_similarity_weight": "a"}, "", marks=pytest.mark.skip),
({"variables": []}, ""),
({"top_n": 0}, ""),
({"top_n": 1}, ""),
pytest.param({"top_n": -1}, "", marks=pytest.mark.skip),
pytest.param({"top_n": 10}, "", marks=pytest.mark.skip),
pytest.param({"top_n": "a"}, "", marks=pytest.mark.skip),
({"empty_response": "Hello World"}, ""),
({"empty_response": ""}, ""),
({"empty_response": "!@#$%^&*()"}, ""),
({"empty_response": "ไธญๆๆต่ฏ"}, ""),
pytest.param({"empty_response": 123}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": True}, "", marks=pytest.mark.skip),
pytest.param({"empty_response": " "}, "", marks=pytest.mark.skip),
({"opener": "Hello World"}, ""),
({"opener": ""}, ""),
({"opener": "!@#$%^&*()"}, ""),
({"opener": "ไธญๆๆต่ฏ"}, ""),
pytest.param({"opener": 123}, "", marks=pytest.mark.skip),
pytest.param({"opener": True}, "", marks=pytest.mark.skip),
pytest.param({"opener": " "}, "", marks=pytest.mark.skip),
({"show_quote": True}, ""),
({"show_quote": False}, ""),
({"prompt": "Hello World {knowledge}"}, ""),
({"prompt": "{knowledge}"}, ""),
({"prompt": "!@#$%^&*() {knowledge}"}, ""),
({"prompt": "ไธญๆๆต่ฏ {knowledge}"}, ""),
({"prompt": "Hello World"}, ""),
({"prompt": "Hello World", "variables": []}, ""),
pytest.param({"prompt": 123}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"prompt": True}, """AttributeError("\'int\' object has no attribute \'find\'")""", marks=pytest.mark.skip),
pytest.param({"unknown": "unknown"}, "", marks=pytest.mark.skip),
],
)
def test_prompt(self, client, add_chat_assistants_func, prompt, expected_message):
dataset, _, chat_assistants = add_chat_assistants_func
chat_assistant = chat_assistants[0]
payload = {"name": "prompt_test", "prompt": prompt, "dataset_ids": [dataset.id]}
if expected_message:
with pytest.raises(Exception) as exception_info:
chat_assistant.update(payload)
assert expected_message in str(exception_info.value)
else:
chat_assistant.update(payload)
updated_chat = client.list_chats(id=chat_assistant.id)[0]
if prompt:
for k, v in prompt.items():
if k == "keywords_similarity_weight":
assert attrgetter(k)(updated_chat.prompt) == 1 - v, str(updated_chat)
else:
assert attrgetter(k)(updated_chat.prompt) == v, str(updated_chat)
else:
excepted_value = Chat.LLM(
client,
{
"similarity_threshold": 0.2,
"keywords_similarity_weight": 0.7,
"top_n": 6,
"variables": [{"key": "knowledge", "optional": False}],
"rerank_model": "",
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
"opener": "Hi! I'm your assistant. What can I do for you?",
"show_quote": True,
"prompt": 'You are an intelligent assistant. Please summarize the content of the dataset to answer the question. Please list the data in the dataset and answer in detail. When all dataset content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the dataset!" Answers need to consider chat history.\n Here is the knowledge base:\n {knowledge}\n The above is the knowledge base.',
},
)
assert str(updated_chat.prompt) == str(excepted_value), str(updated_chat)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chat_assistant_management/test_update_chat_assistant.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_add_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from time import sleep
import pytest
from ragflow_sdk import Chunk
def validate_chunk_details(dataset_id: str, document_id: str, payload: dict, chunk: Chunk):
assert chunk.dataset_id == dataset_id
assert chunk.document_id == document_id
assert chunk.content == payload["content"]
if "important_keywords" in payload:
assert chunk.important_keywords == payload["important_keywords"]
if "questions" in payload:
assert chunk.questions == [str(q).strip() for q in payload.get("questions", []) if str(q).strip()]
class TestAddChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": None}, "not instance of"),
({"content": ""}, "`content` is required"),
({"content": 1}, "not instance of"),
({"content": "a"}, ""),
({"content": " "}, "`content` is required"),
({"content": "\n!?ใ๏ผ๏ผ๏ผ\"'"}, ""),
],
)
def test_content(self, add_document, payload, expected_message):
dataset, document = add_document
chunks_count = len(document.list_chunks())
if expected_message:
with pytest.raises(Exception) as exception_info:
document.add_chunk(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": "chunk test important_keywords 1", "important_keywords": ["a", "b", "c"]}, ""),
({"content": "chunk test important_keywords 2", "important_keywords": [""]}, ""),
({"content": "chunk test important_keywords 3", "important_keywords": [1]}, "not instance of"),
({"content": "chunk test important_keywords 4", "important_keywords": ["a", "a"]}, ""),
({"content": "chunk test important_keywords 5", "important_keywords": "abc"}, "not instance of"),
({"content": "chunk test important_keywords 6", "important_keywords": 123}, "not instance of"),
],
)
def test_important_keywords(self, add_document, payload, expected_message):
dataset, document = add_document
chunks_count = len(document.list_chunks())
if expected_message:
with pytest.raises(Exception) as exception_info:
document.add_chunk(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": "chunk test test_questions 1", "questions": ["a", "b", "c"]}, ""),
({"content": "chunk test test_questions 2", "questions": [""]}, ""),
({"content": "chunk test test_questions 3", "questions": [1]}, "not instance of"),
({"content": "chunk test test_questions 4", "questions": ["a", "a"]}, ""),
({"content": "chunk test test_questions 5", "questions": "abc"}, "not instance of"),
({"content": "chunk test test_questions 6", "questions": 123}, "not instance of"),
],
)
def test_questions(self, add_document, payload, expected_message):
dataset, document = add_document
chunks_count = len(document.list_chunks())
if expected_message:
with pytest.raises(Exception) as exception_info:
document.add_chunk(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p3
def test_repeated_add_chunk(self, add_document):
payload = {"content": "chunk test repeated_add_chunk"}
dataset, document = add_document
chunks_count = len(document.list_chunks())
chunk1 = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk1)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
chunk2 = document.add_chunk(**payload)
validate_chunk_details(dataset.id, document.id, payload, chunk2)
sleep(1)
chunks = document.list_chunks()
assert len(chunks) == chunks_count + 1, str(chunks)
@pytest.mark.p2
def test_add_chunk_to_deleted_document(self, add_document):
dataset, document = add_document
dataset.delete_documents(ids=[document.id])
with pytest.raises(Exception) as exception_info:
document.add_chunk(content="chunk test")
assert f"You don't own the document {document.id}" in str(exception_info.value), str(exception_info.value)
@pytest.mark.skip(reason="issues/6411")
@pytest.mark.p3
def test_concurrent_add_chunk(self, add_document):
count = 50
_, document = add_document
initial_chunk_count = len(document.list_chunks())
def add_chunk_task(i):
return document.add_chunk(content=f"chunk test concurrent {i}")
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(add_chunk_task, i) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
sleep(5)
assert len(document.list_chunks(page_size=100)) == initial_chunk_count + count
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_add_chunk.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_delete_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks
class TestChunksDeletion:
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:4]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_delete_partial_invalid_id(self, add_chunks_func, payload):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
payload = payload(chunk_ids)
with pytest.raises(Exception) as exception_info:
document.delete_chunks(**payload)
assert "rm_chunk deleted chunks" in str(exception_info.value), str(exception_info.value)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 1, str(remaining_chunks)
@pytest.mark.p3
def test_repeated_deletion(self, add_chunks_func):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
document.delete_chunks(ids=chunk_ids)
with pytest.raises(Exception) as exception_info:
document.delete_chunks(ids=chunk_ids)
assert "rm_chunk deleted chunks 0, expect" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_duplicate_deletion(self, add_chunks_func):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
document.delete_chunks(ids=chunk_ids * 2)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 1, str(remaining_chunks)
@pytest.mark.p3
def test_concurrent_deletion(self, add_document):
count = 100
_, document = add_document
chunks = batch_add_chunks(document, count)
chunk_ids = [chunk.id for chunk in chunks]
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(document.delete_chunks, ids=[chunk_id]) for chunk_id in chunk_ids]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_delete_1k(self, add_document):
count = 1_000
_, document = add_document
chunks = batch_add_chunks(document, count)
chunk_ids = [chunk.id for chunk in chunks]
from time import sleep
sleep(1)
document.delete_chunks(ids=chunk_ids)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 0, str(remaining_chunks)
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
pytest.param(None, "TypeError", 5, marks=pytest.mark.skip),
pytest.param({"ids": ["invalid_id"]}, "rm_chunk deleted chunks 0, expect 1", 5, marks=pytest.mark.p3),
pytest.param("not json", "UnboundLocalError", 5, marks=pytest.mark.skip(reason="pull/6376")),
pytest.param(lambda r: {"ids": r[:1]}, "", 4, marks=pytest.mark.p3),
pytest.param(lambda r: {"ids": r}, "", 1, marks=pytest.mark.p1),
pytest.param({"ids": []}, "", 0, marks=pytest.mark.p3),
],
)
def test_basic_scenarios(self, add_chunks_func, payload, expected_message, remaining):
_, document, chunks = add_chunks_func
chunk_ids = [chunk.id for chunk in chunks]
if callable(payload):
payload = payload(chunk_ids)
if expected_message:
with pytest.raises(Exception) as exception_info:
document.delete_chunks(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.delete_chunks(**payload)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == remaining, str(remaining_chunks)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_delete_chunks.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_list_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import batch_add_chunks
from utils.engine_utils import get_doc_engine
class TestChunksList:
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 2, ""),
pytest.param({"page": 0, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 1, ""),
pytest.param({"page": -1, "page_size": 2}, 0, "ValueError('Search does not support negative slicing.')", marks=pytest.mark.skip),
pytest.param({"page": "a", "page_size": 2}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page(self, add_chunks, params, expected_page_size, expected_message):
_, document, _ = add_chunks
if expected_message:
with pytest.raises(Exception) as exception_info:
document.list_chunks(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = document.list_chunks(**params)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": None}, 5, ""),
pytest.param({"page_size": 0}, 5, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 1, ""),
pytest.param({"page_size": -1}, 5, "", marks=pytest.mark.skip),
pytest.param({"page_size": "a"}, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
],
)
def test_page_size(self, add_chunks, params, expected_page_size, expected_message):
_, document, _ = add_chunks
if expected_message:
with pytest.raises(Exception) as exception_info:
document.list_chunks(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = document.list_chunks(**params)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "1"}, 1),
({"keywords": "chunk"}, 4),
pytest.param({"keywords": "ragflow"}, 1, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6509")),
pytest.param({"keywords": "ragflow"}, 5, marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") != "infinity", reason="issues/6509")),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, add_chunks, params, expected_page_size):
_, document, _ = add_chunks
if params.get("keywords") == "ragflow":
doc_engine = get_doc_engine(document.rag)
if doc_engine == "infinity" and expected_page_size == 1:
pytest.skip("issues/6509")
if doc_engine != "infinity" and expected_page_size == 5:
pytest.skip("issues/6509")
chunks = document.list_chunks(**params)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p1
@pytest.mark.parametrize(
"chunk_id, expected_page_size, expected_message",
[
(None, 5, ""),
("", 5, ""),
pytest.param(lambda r: r[0], 1, "", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6499")),
pytest.param("unknown", 0, """AttributeError("\'NoneType\' object has no attribute \'keys\'")""", marks=pytest.mark.skip),
],
)
def test_id(self, add_chunks, chunk_id, expected_page_size, expected_message):
_, document, chunks = add_chunks
if callable(chunk_id) and get_doc_engine(document.rag) == "infinity":
pytest.skip("issues/6499")
chunk_ids = [chunk.id for chunk in chunks]
if callable(chunk_id):
params = {"id": chunk_id(chunk_ids)}
else:
params = {"id": chunk_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
document.list_chunks(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = document.list_chunks(**params)
if params["id"] in [None, ""]:
assert len(chunks) == expected_page_size, str(chunks)
else:
assert chunks[0].id == params["id"], str(chunks)
@pytest.mark.p3
def test_concurrent_list(self, add_chunks):
_, document, _ = add_chunks
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(document.list_chunks) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(len(future.result()) == 5 for future in futures)
@pytest.mark.p1
def test_default(self, add_document):
_, document = add_document
batch_add_chunks(document, 31)
from time import sleep
sleep(3)
chunks = document.list_chunks()
assert len(chunks) == 30, str(chunks)
@pytest.mark.p2
def test_list_chunks_invalid_document_id_raises(self, add_chunks):
_, document, _ = add_chunks
invalid_document = document.__class__(
document.rag,
{"id": "missing-document-id-for-chunks", "dataset_id": document.dataset_id},
)
with pytest.raises(Exception) as exception_info:
invalid_document.list_chunks()
assert str(exception_info.value), exception_info
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_list_chunks.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_retrieval_chunks.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
DOC_ENGINE = (os.getenv("DOC_ENGINE") or "").lower()
class TestChunksRetrieval:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"question": "chunk", "dataset_ids": None}, 4, ""),
({"question": "chunk", "document_ids": None}, 0, "missing 1 required positional argument"),
({"question": "chunk", "dataset_ids": None, "document_ids": None}, 4, ""),
({"question": "chunk"}, 0, "missing 1 required positional argument"),
],
)
def test_basic_scenarios(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, document, _ = add_chunks
if "dataset_ids" in payload:
payload["dataset_ids"] = [dataset.id]
if "document_ids" in payload:
payload["document_ids"] = [document.id]
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
pytest.param(
{"page": None, "page_size": 2},
2,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not \'NoneType\'")""",
marks=pytest.mark.skip,
),
pytest.param(
{"page": 0, "page_size": 2},
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 0, ""),
({"page": "3", "page_size": 2}, 0, ""),
pytest.param(
{"page": -1, "page_size": 2},
0,
"ValueError('Search does not support negative slicing.')",
marks=pytest.mark.skip,
),
pytest.param(
{"page": "a", "page_size": 2},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_page(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
pytest.param(
{"page_size": None},
0,
"""TypeError("int() argument must be a string, a bytes-like object or a real number, not \'NoneType\'")""",
marks=pytest.mark.skip,
),
pytest.param({"page_size": 1}, 1, "", marks=pytest.mark.skip(reason="issues/10692")),
({"page_size": 5}, 4, ""),
pytest.param({"page_size": "1"}, 1, "", marks=pytest.mark.skip(reason="issues/10692")),
pytest.param(
{"page_size": "a"},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_page_size(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"vector_similarity_weight": 0}, 4, ""),
({"vector_similarity_weight": 0.5}, 4, ""),
({"vector_similarity_weight": 10}, 4, ""),
pytest.param(
{"vector_similarity_weight": "a"},
0,
"""ValueError("could not convert string to float: 'a'")""",
marks=pytest.mark.skip,
),
],
)
def test_vector_similarity_weight(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"top_k": 10}, 4, ""),
pytest.param(
{"top_k": 1},
4,
"",
marks=pytest.mark.skipif(DOC_ENGINE in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": 1},
1,
"",
marks=pytest.mark.skipif(DOC_ENGINE in ["", "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": -1},
4,
"must be greater than 0",
marks=pytest.mark.skipif(DOC_ENGINE in ["infinity", "opensearch"], reason="Infinity"),
),
pytest.param(
{"top_k": -1},
4,
"3014",
marks=pytest.mark.skipif(DOC_ENGINE in ["", "opensearch", "elasticsearch"], reason="elasticsearch"),
),
pytest.param(
{"top_k": "a"},
0,
"""ValueError("invalid literal for int() with base 10: \'a\'")""",
marks=pytest.mark.skip,
),
],
)
def test_top_k(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_message",
[
({"rerank_id": "BAAI/bge-reranker-v2-m3"}, ""),
pytest.param({"rerank_id": "unknown"}, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip),
],
)
def test_rerank_id(self, client, add_chunks, payload, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) > 0, str(chunks)
@pytest.mark.skip
@pytest.mark.parametrize(
"payload, expected_page_size, expected_message",
[
({"keyword": True}, 5, ""),
({"keyword": "True"}, 5, ""),
({"keyword": False}, 5, ""),
({"keyword": "False"}, 5, ""),
({"keyword": None}, 5, ""),
],
)
def test_keyword(self, client, add_chunks, payload, expected_page_size, expected_message):
dataset, _, _ = add_chunks
payload.update({"question": "chunk test", "dataset_ids": [dataset.id]})
if expected_message:
with pytest.raises(Exception) as exception_info:
client.retrieve(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunks = client.retrieve(**payload)
assert len(chunks) == expected_page_size, str(chunks)
@pytest.mark.p3
def test_concurrent_retrieval(self, client, add_chunks):
dataset, _, _ = add_chunks
count = 100
payload = {"question": "chunk", "dataset_ids": [dataset.id]}
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.retrieve, **payload) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_retrieval_chunks.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_update_chunk.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randint
import pytest
class TestUpdatedChunk:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_message",
[
({"content": None}, ""),
pytest.param(
{"content": ""},
"""APIRequestFailedError(\'Error code: 400, with error text {"error":{"code":"1213","message":"ๆชๆญฃๅธธๆฅๆถๅฐpromptๅๆฐใ"}}\')""",
marks=pytest.mark.skip(reason="issues/6541"),
),
pytest.param(
{"content": 1},
"TypeError('expected string or bytes-like object')",
marks=pytest.mark.skip,
),
({"content": "update chunk"}, ""),
pytest.param(
{"content": " "},
"""APIRequestFailedError(\'Error code: 400, with error text {"error":{"code":"1213","message":"ๆชๆญฃๅธธๆฅๆถๅฐpromptๅๆฐใ"}}\')""",
marks=pytest.mark.skip(reason="issues/6541"),
),
({"content": "\n!?ใ๏ผ๏ผ๏ผ\"'"}, ""),
],
)
def test_content(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"important_keywords": ["a", "b", "c"]}, ""),
({"important_keywords": [""]}, ""),
({"important_keywords": [1]}, "TypeError('sequence item 0: expected str instance, int found')"),
({"important_keywords": ["a", "a"]}, ""),
({"important_keywords": "abc"}, "`important_keywords` should be a list"),
({"important_keywords": 123}, "`important_keywords` should be a list"),
],
)
def test_important_keywords(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"questions": ["a", "b", "c"]}, ""),
({"questions": [""]}, ""),
({"questions": [1]}, "TypeError('sequence item 0: expected str instance, int found')"),
({"questions": ["a", "a"]}, ""),
({"questions": "abc"}, "`questions` should be a list"),
({"questions": 123}, "`questions` should be a list"),
],
)
def test_questions(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload, expected_message",
[
({"available": True}, ""),
pytest.param({"available": "True"}, """ValueError("invalid literal for int() with base 10: \'True\'")""", marks=pytest.mark.skip),
({"available": 1}, ""),
({"available": False}, ""),
pytest.param({"available": "False"}, """ValueError("invalid literal for int() with base 10: \'False\'")""", marks=pytest.mark.skip),
({"available": 0}, ""),
],
)
def test_available(self, add_chunks, payload, expected_message):
_, _, chunks = add_chunks
chunk = chunks[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
chunk.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
chunk.update(payload)
@pytest.mark.p3
def test_repeated_update_chunk(self, add_chunks):
_, _, chunks = add_chunks
chunk = chunks[0]
chunk.update({"content": "chunk test 1"})
chunk.update({"content": "chunk test 2"})
@pytest.mark.p3
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554")
def test_concurrent_update_chunk(self, add_chunks):
count = 50
_, _, chunks = add_chunks
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(chunks[randint(0, 3)].update, {"content": f"update chunk test {i}"}) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_update_chunk_to_deleted_document(self, add_chunks):
dataset, document, chunks = add_chunks
dataset.delete_documents(ids=[document.id])
with pytest.raises(Exception) as exception_info:
chunks[0].update({})
assert str(exception_info.value) in [f"You don't own the document {chunks[0].document_id}", f"Can't find this chunk {chunks[0].id}"], str(exception_info.value)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_update_chunk.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_delete_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents
class TestDocumentsDeletion:
@pytest.mark.p1
@pytest.mark.parametrize(
"payload, expected_message, remaining",
[
({"ids": None}, "", 0),
({"ids": []}, "", 0),
({"ids": ["invalid_id"]}, "Documents not found: ['invalid_id']", 3),
({"ids": ["\n!?ใ๏ผ๏ผ๏ผ\"'"]}, "Documents not found: ['\\n!?ใ๏ผ๏ผ๏ผ\"\\'']", 3),
("not json", "must be a mapping", 3),
(lambda r: {"ids": r[:1]}, "", 2),
(lambda r: {"ids": r}, "", 0),
],
)
def test_basic_scenarios(
self,
add_documents_func,
payload,
expected_message,
remaining,
):
dataset, documents = add_documents_func
if callable(payload):
payload = payload([document.id for document in documents])
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.delete_documents(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.delete_documents(**payload)
documents = dataset.list_documents()
assert len(documents) == remaining, str(documents)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
lambda r: {"ids": ["invalid_id"] + r},
lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:3]},
lambda r: {"ids": r + ["invalid_id"]},
],
)
def test_delete_partial_invalid_id(self, add_documents_func, payload):
dataset, documents = add_documents_func
payload = payload([document.id for document in documents])
with pytest.raises(Exception) as exception_info:
dataset.delete_documents(**payload)
assert "Documents not found: ['invalid_id']" in str(exception_info.value), str(exception_info.value)
documents = dataset.list_documents()
assert len(documents) == 0, str(documents)
@pytest.mark.p2
def test_repeated_deletion(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [document.id for document in documents]
dataset.delete_documents(ids=document_ids)
with pytest.raises(Exception) as exception_info:
dataset.delete_documents(ids=document_ids)
assert "Documents not found" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_duplicate_deletion(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [document.id for document in documents]
dataset.delete_documents(ids=document_ids + document_ids)
assert len(dataset.list_documents()) == 0, str(dataset.list_documents())
@pytest.mark.p3
def test_concurrent_deletion(add_dataset, tmp_path):
count = 100
dataset = add_dataset
documents = bulk_upload_documents(dataset, count, tmp_path)
def delete_doc(doc_id):
dataset.delete_documents(ids=[doc_id])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(delete_doc, doc.id) for doc in documents]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.p3
def test_delete_1k(add_dataset, tmp_path):
count = 1_000
dataset = add_dataset
documents = bulk_upload_documents(dataset, count, tmp_path)
assert len(dataset.list_documents(page_size=count * 2)) == count
dataset.delete_documents(ids=[doc.id for doc in documents])
assert len(dataset.list_documents()) == 0
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_delete_documents.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_download_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents
from utils import compare_by_hash
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(add_dataset, generate_test_files, request):
dataset = add_dataset
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
with fp.with_stem("ragflow_test_download").open("wb") as f:
f.write(document.download())
assert compare_by_hash(fp, fp.with_stem("ragflow_test_download"))
class TestDocumentDownload:
@pytest.mark.p3
def test_same_file_repeat(self, add_documents, tmp_path, ragflow_tmp_dir):
num = 5
_, documents = add_documents
for i in range(num):
download_path = tmp_path / f"ragflow_test_download_{i}.txt"
with download_path.open("wb") as f:
f.write(documents[0].download())
assert compare_by_hash(ragflow_tmp_dir / "ragflow_test_upload_0.txt", download_path), f"Downloaded file {i} does not match original"
@pytest.mark.p2
def test_download_error_json_raises(self, add_documents):
dataset, documents = add_documents
document = documents[0]
invalid_document = document.__class__(
document.rag,
{"id": "missing-document-id-for-download", "dataset_id": dataset.id},
)
with pytest.raises(Exception) as exception_info:
invalid_document.download()
assert str(exception_info.value), exception_info
@pytest.mark.p3
def test_concurrent_download(add_dataset, tmp_path):
count = 20
dataset = add_dataset
documents = bulk_upload_documents(dataset, count, tmp_path)
def download_doc(document, i):
download_path = tmp_path / f"ragflow_test_download_{i}.txt"
with download_path.open("wb") as f:
f.write(document.download())
# assert compare_by_hash(tmp_path / f"ragflow_test_upload_{i}.txt", download_path), str(download_path)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(download_doc, documents[i], i) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
for i in range(count):
assert compare_by_hash(
tmp_path / f"ragflow_test_upload_{i}.txt",
tmp_path / f"ragflow_test_download_{i}.txt",
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_download_document.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
class TestDocumentsList:
@pytest.mark.p1
def test_default(self, add_documents):
dataset, _ = add_documents
documents = dataset.list_documents()
assert len(documents) == 5, str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page": None, "page_size": 2}, 2, "not instance of"),
({"page": 0, "page_size": 2}, 2, ""),
({"page": 2, "page_size": 2}, 2, ""),
({"page": 3, "page_size": 2}, 1, ""),
({"page": "3", "page_size": 2}, 1, "not instance of"),
pytest.param(
{"page": -1, "page_size": 2},
0,
"Invalid page number",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page": "a", "page_size": 2},
0,
"Invalid page value",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page(self, add_documents, params, expected_page_size, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_page_size, str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size, expected_message",
[
({"page_size": None}, 5, "not instance of"),
({"page_size": 0}, 0, ""),
({"page_size": 1}, 1, ""),
({"page_size": 6}, 5, ""),
({"page_size": "1"}, 1, "not instance of"),
pytest.param(
{"page_size": -1},
0,
"Invalid page size",
marks=pytest.mark.skip(reason="issues/5851"),
),
pytest.param(
{"page_size": "a"},
0,
"Invalid page size value",
marks=pytest.mark.skip(reason="issues/5851"),
),
],
)
def test_page_size(self, add_documents, params, expected_page_size, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_page_size, str(documents)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"orderby": None}, "not instance of"),
({"orderby": "create_time"}, ""),
({"orderby": "update_time"}, ""),
pytest.param({"orderby": "name", "desc": "False"}, "", marks=pytest.mark.skip(reason="issues/5851")),
pytest.param({"orderby": "unknown"}, "orderby should be create_time or update_time", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_orderby(self, add_documents, params, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.list_documents(**params)
@pytest.mark.p3
@pytest.mark.parametrize(
"params, expected_message",
[
({"desc": None}, "not instance of"),
({"desc": "true"}, "not instance of"),
({"desc": "True"}, "not instance of"),
({"desc": True}, ""),
pytest.param({"desc": "false"}, "", marks=pytest.mark.skip(reason="issues/5851")),
({"desc": "False"}, "not instance of"),
({"desc": False}, ""),
({"desc": "False", "orderby": "update_time"}, "not instance of"),
pytest.param({"desc": "unknown"}, "desc should be true or false", marks=pytest.mark.skip(reason="issues/5851")),
],
)
def test_desc(self, add_documents, params, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.list_documents(**params)
@pytest.mark.p2
@pytest.mark.parametrize(
"params, expected_num",
[
({"keywords": None}, 5),
({"keywords": ""}, 5),
({"keywords": "0"}, 1),
({"keywords": "ragflow_test_upload"}, 5),
({"keywords": "unknown"}, 0),
],
)
def test_keywords(self, add_documents, params, expected_num):
dataset, _ = add_documents
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_num, expected_message",
[
({"name": None}, 5, ""),
({"name": ""}, 5, ""),
({"name": "ragflow_test_upload_0.txt"}, 1, ""),
({"name": "unknown.txt"}, 0, "You don't own the document unknown.txt"),
],
)
def test_name(self, add_documents, params, expected_num, expected_message):
dataset, _ = add_documents
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
if params["name"] not in [None, ""]:
assert documents[0].name == params["name"], str(documents)
@pytest.mark.p1
@pytest.mark.parametrize(
"document_id, expected_num, expected_message",
[
(None, 5, ""),
("", 5, ""),
(lambda docs: docs[0].id, 1, ""),
("unknown.txt", 0, "You don't own the document unknown.txt"),
],
)
def test_id(self, add_documents, document_id, expected_num, expected_message):
dataset, documents = add_documents
if callable(document_id):
params = {"id": document_id(documents)}
else:
params = {"id": document_id}
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
if params["id"] not in [None, ""]:
assert documents[0].id == params["id"], str(documents)
@pytest.mark.p3
@pytest.mark.parametrize(
"document_id, name, expected_num, expected_message",
[
(lambda docs: docs[0].id, "ragflow_test_upload_0.txt", 1, ""),
(lambda docs: docs[0].id, "ragflow_test_upload_1.txt", 0, ""),
(lambda docs: docs[0].id, "unknown", 0, "You don't own the document unknown"),
("invalid_id", "ragflow_test_upload_0.txt", 0, "You don't own the document invalid_id"),
],
)
def test_name_and_id(self, add_documents, document_id, name, expected_num, expected_message):
dataset, documents = add_documents
params = {"id": document_id(documents) if callable(document_id) else document_id, "name": name}
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.list_documents(**params)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
documents = dataset.list_documents(**params)
assert len(documents) == expected_num, str(documents)
@pytest.mark.p3
def test_concurrent_list(self, add_documents):
dataset, _ = add_documents
count = 100
def list_docs():
return dataset.list_documents()
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_docs) for _ in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
for future in futures:
docs = future.result()
assert len(docs) == 5, str(docs)
@pytest.mark.p3
def test_invalid_params(self, add_documents):
dataset, _ = add_documents
params = {"a": "b"}
with pytest.raises(TypeError) as exception_info:
dataset.list_documents(**params)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_parse_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from common import bulk_upload_documents
from ragflow_sdk import DataSet
from ragflow_sdk.modules.document import Document
from utils import wait_for
@wait_for(30, 1, "Document parsing timeout")
def condition(_dataset: DataSet, _document_ids: list[str] = None):
documents = _dataset.list_documents(page_size=1000)
if _document_ids is None:
for document in documents:
if document.run != "DONE":
return False
return True
target_ids = set(_document_ids)
for document in documents:
if document.id in target_ids:
if document.run != "DONE":
return False
return True
def validate_document_details(dataset, document_ids):
documents = dataset.list_documents(page_size=1000)
for document in documents:
if document.id in document_ids:
assert document.run == "DONE"
assert len(document.process_begin_at) > 0
assert document.process_duration > 0
assert document.progress > 0
assert "Task done" in document.progress_msg
class TestDocumentsParse:
@pytest.mark.parametrize(
"payload, expected_message",
[
pytest.param(None, "AttributeError", marks=pytest.mark.skip),
pytest.param({"document_ids": []}, "`document_ids` is required", marks=pytest.mark.p1),
pytest.param({"document_ids": ["invalid_id"]}, "Documents not found: ['invalid_id']", marks=pytest.mark.p3),
pytest.param({"document_ids": ["\n!?ใ๏ผ๏ผ๏ผ\"'"]}, "Documents not found: ['\\n!?ใ๏ผ๏ผ๏ผ\"\\'']", marks=pytest.mark.p3),
pytest.param("not json", "AttributeError", marks=pytest.mark.skip),
pytest.param(lambda r: {"document_ids": r[:1]}, "", marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r}, "", marks=pytest.mark.p1),
],
)
def test_basic_scenarios(self, add_documents_func, payload, expected_message):
dataset, documents = add_documents_func
if callable(payload):
payload = payload([doc.id for doc in documents])
if expected_message:
with pytest.raises(Exception) as exception_info:
dataset.async_parse_documents(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
dataset.async_parse_documents(**payload)
condition(dataset, payload["document_ids"])
validate_document_details(dataset, payload["document_ids"])
@pytest.mark.parametrize(
"payload",
[
pytest.param(lambda r: {"document_ids": ["invalid_id"] + r}, marks=pytest.mark.p3),
pytest.param(lambda r: {"document_ids": r[:1] + ["invalid_id"] + r[1:3]}, marks=pytest.mark.p1),
pytest.param(lambda r: {"document_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
],
)
def test_parse_partial_invalid_document_id(self, add_documents_func, payload):
dataset, documents = add_documents_func
document_ids = [doc.id for doc in documents]
payload = payload(document_ids)
with pytest.raises(Exception) as exception_info:
dataset.async_parse_documents(**payload)
assert "Documents not found: ['invalid_id']" in str(exception_info.value), str(exception_info.value)
condition(dataset, document_ids)
validate_document_details(dataset, document_ids)
@pytest.mark.p3
def test_repeated_parse(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [doc.id for doc in documents]
dataset.async_parse_documents(document_ids=document_ids)
condition(dataset, document_ids)
dataset.async_parse_documents(document_ids=document_ids)
@pytest.mark.p3
def test_duplicate_parse(self, add_documents_func):
dataset, documents = add_documents_func
document_ids = [doc.id for doc in documents]
dataset.async_parse_documents(document_ids=document_ids + document_ids)
condition(dataset, document_ids)
validate_document_details(dataset, document_ids)
@pytest.mark.p2
def test_get_documents_status_handles_retry_terminal_and_progress_paths(add_dataset_func, monkeypatch):
dataset = add_dataset_func
call_counts = {"doc-retry": 0, "doc-progress": 0, "doc-exception": 0}
def _doc(doc_id, run, chunk_count, token_count, progress):
return Document(
dataset.rag,
{
"id": doc_id,
"dataset_id": dataset.id,
"run": run,
"chunk_count": chunk_count,
"token_count": token_count,
"progress": progress,
},
)
def _list_documents(id=None, **_kwargs):
if id == "doc-retry":
call_counts["doc-retry"] += 1
if call_counts["doc-retry"] == 1:
return []
return [_doc("doc-retry", "DONE", 3, 5, 0.0)]
if id == "doc-progress":
call_counts["doc-progress"] += 1
return [_doc("doc-progress", "RUNNING", 2, 4, 1.0)]
if id == "doc-exception":
call_counts["doc-exception"] += 1
if call_counts["doc-exception"] == 1:
raise Exception("temporary list failure")
return [_doc("doc-exception", "DONE", 7, 11, 0.0)]
return []
monkeypatch.setattr(dataset, "list_documents", _list_documents)
monkeypatch.setattr("time.sleep", lambda *_args, **_kwargs: None)
finished = dataset._get_documents_status(["doc-retry", "doc-progress", "doc-exception"])
assert {item[0] for item in finished} == {"doc-retry", "doc-progress", "doc-exception"}
finished_map = {item[0]: item for item in finished}
assert finished_map["doc-retry"][1] == "DONE"
assert finished_map["doc-progress"][1] == "DONE"
assert finished_map["doc-exception"][1] == "DONE"
@pytest.mark.p2
def test_parse_documents_keyboard_interrupt_triggers_cancel_then_returns_status(add_dataset_func, monkeypatch):
dataset = add_dataset_func
state = {"cancel_calls": 0, "status_calls": 0}
expected_status = [("doc-1", "DONE", 1, 2)]
def _raise_keyboard_interrupt(_document_ids):
raise KeyboardInterrupt
def _cancel(document_ids):
state["cancel_calls"] += 1
assert document_ids == ["doc-1"]
def _status(document_ids):
state["status_calls"] += 1
assert document_ids == ["doc-1"]
return expected_status
monkeypatch.setattr(dataset, "async_parse_documents", _raise_keyboard_interrupt)
monkeypatch.setattr(dataset, "async_cancel_parse_documents", _cancel)
monkeypatch.setattr(dataset, "_get_documents_status", _status)
status = dataset.parse_documents(["doc-1"])
assert status == expected_status
assert state["cancel_calls"] == 1
assert state["status_calls"] == 1
@pytest.mark.p2
def test_parse_documents_happy_path_runs_initial_wait_then_returns_status(add_dataset_func, monkeypatch):
dataset = add_dataset_func
state = {"status_calls": 0}
def _noop_parse(_document_ids):
return None
def _status(document_ids):
state["status_calls"] += 1
assert document_ids == ["doc-1"]
return [("doc-1", f"DONE-{state['status_calls']}", 1, 2)]
monkeypatch.setattr(dataset, "async_parse_documents", _noop_parse)
monkeypatch.setattr(dataset, "_get_documents_status", _status)
status = dataset.parse_documents(["doc-1"])
assert state["status_calls"] == 2
assert status == [("doc-1", "DONE-2", 1, 2)]
@pytest.mark.p2
def test_async_cancel_parse_documents_raises_on_nonzero_code(add_dataset_func, monkeypatch):
dataset = add_dataset_func
class _Resp:
@staticmethod
def json():
return {"code": 102, "message": "cancel failed"}
monkeypatch.setattr(dataset, "rm", lambda *_args, **_kwargs: _Resp())
with pytest.raises(Exception) as exc_info:
dataset.async_cancel_parse_documents(["doc-1"])
assert "cancel failed" in str(exc_info.value), str(exc_info.value)
@pytest.mark.p3
def test_parse_100_files(add_dataset_func, tmp_path):
@wait_for(200, 1, "Document parsing timeout")
def condition_inner(_dataset: DataSet, _count: int):
docs = _dataset.list_documents(page_size=_count * 2)
for document in docs:
if document.run != "DONE":
return False
return True
count = 100
dataset = add_dataset_func
documents = bulk_upload_documents(dataset, count, tmp_path)
document_ids = [doc.id for doc in documents]
dataset.async_parse_documents(document_ids=document_ids)
condition_inner(dataset, count)
validate_document_details(dataset, document_ids)
@pytest.mark.p3
def test_concurrent_parse(add_dataset_func, tmp_path):
@wait_for(200, 1, "Document parsing timeout")
def condition_inner(_dataset: DataSet, _count: int):
docs = _dataset.list_documents(page_size=_count * 2)
for document in docs:
if document.run != "DONE":
return False
return True
count = 100
dataset = add_dataset_func
documents = bulk_upload_documents(dataset, count, tmp_path)
document_ids = [doc.id for doc in documents]
def parse_doc(doc_id):
dataset.async_parse_documents(document_ids=[doc_id])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(parse_doc, doc.id) for doc in documents]
responses = list(as_completed(futures))
assert len(responses) == count, responses
condition_inner(dataset, count)
validate_document_details(dataset, document_ids)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_parse_documents.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_stop_parse_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
def validate_document_parse_done(dataset, document_ids):
documents = dataset.list_documents(page_size=1000)
for document in documents:
if document.id in document_ids:
assert document.run == "DONE"
assert len(document.process_begin_at) > 0
assert document.process_duration > 0
assert document.progress > 0
assert "Task done" in document.progress_msg
def validate_document_parse_cancel(dataset, document_ids):
documents = dataset.list_documents(page_size=1000)
for document in documents:
assert document.run == "CANCEL"
assert len(document.process_begin_at) > 0
assert document.progress == 0.0
@pytest.mark.skip
class TestDocumentsParseStop:
pass
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_stop_parse_documents.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_update_document.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from configs import DOCUMENT_NAME_LIMIT
from ragflow_sdk import DataSet
from configs import DEFAULT_PARSER_CONFIG
class TestDocumentsUpdated:
@pytest.mark.p1
@pytest.mark.parametrize(
"name, expected_message",
[
("new_name.txt", ""),
(f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt", ""),
(0, "AttributeError"),
(None, "AttributeError"),
("", "The extension of file can't be changed"),
("ragflow_test_upload_0", "The extension of file can't be changed"),
("ragflow_test_upload_1.txt", "Duplicated document name in the same dataset"),
("RAGFLOW_TEST_UPLOAD_1.TXT", ""),
],
)
def test_name(self, add_documents, name, expected_message):
dataset, documents = add_documents
document = documents[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update({"name": name})
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update({"name": name})
updated_doc = dataset.list_documents(id=document.id)[0]
assert updated_doc.name == name, str(updated_doc)
@pytest.mark.p3
@pytest.mark.parametrize(
"meta_fields, expected_message",
[
({"test": "test"}, ""),
("test", "meta_fields must be a dictionary"),
],
)
def test_meta_fields(self, add_documents, meta_fields, expected_message):
_, documents = add_documents
document = documents[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update({"meta_fields": meta_fields})
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update({"meta_fields": meta_fields})
@pytest.mark.p2
def test_meta_fields_invalid_type_guard_p2(self, add_documents):
_, documents = add_documents
document = documents[0]
with pytest.raises(Exception) as exception_info:
document.update({"meta_fields": "not-a-dict"})
assert "meta_fields must be a dictionary" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method, expected_message",
[
("naive", ""),
("manual", ""),
("qa", ""),
("table", ""),
("paper", ""),
("book", ""),
("laws", ""),
("presentation", ""),
("picture", ""),
("one", ""),
("knowledge_graph", ""),
("email", ""),
("tag", ""),
("", "`chunk_method` doesn't exist"),
("other_chunk_method", "`chunk_method` other_chunk_method doesn't exist"),
],
)
def test_chunk_method(self, add_documents, chunk_method, expected_message):
dataset, documents = add_documents
document = documents[0]
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update({"chunk_method": chunk_method})
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update({"chunk_method": chunk_method})
updated_doc = dataset.list_documents(id=document.id)[0]
assert updated_doc.chunk_method == chunk_method, str(updated_doc)
@pytest.mark.p3
@pytest.mark.parametrize(
"payload, expected_message",
[
({"chunk_count": 1}, "Can't change `chunk_count`"),
pytest.param(
{"create_date": "Fri, 14 Mar 2025 16:53:42 GMT"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"create_time": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"created_by": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"dataset_id": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"id": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"location": "ragflow_test.txt"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"process_begin_at": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"process_duration": 1.0},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
({"progress": 1.0}, "Can't change `progress`"),
pytest.param(
{"progress_msg": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"run": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"size": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"source_type": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"thumbnail": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
({"token_count": 1}, "Can't change `token_count`"),
pytest.param(
{"type": "ragflow_test"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"update_date": "Fri, 14 Mar 2025 16:33:17 GMT"},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
pytest.param(
{"update_time": 1},
"The input parameters are invalid",
marks=pytest.mark.skip(reason="issues/6104"),
),
],
)
def test_invalid_field(self, add_documents, payload, expected_message):
_, documents = add_documents
document = documents[0]
with pytest.raises(Exception) as exception_info:
document.update(payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
class TestUpdateDocumentParserConfig:
@pytest.mark.p2
@pytest.mark.parametrize(
"chunk_method, parser_config, expected_message",
[
("naive", {}, ""),
(
"naive",
DEFAULT_PARSER_CONFIG,
"",
),
pytest.param(
"naive",
{"chunk_token_num": -1},
"chunk_token_num should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 0},
"chunk_token_num should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 100000000},
"chunk_token_num should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"chunk_token_num": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"layout_recognize": "DeepDOC"}, ""),
("naive", {"layout_recognize": "Naive"}, ""),
("naive", {"html4excel": True}, ""),
("naive", {"html4excel": False}, ""),
pytest.param(
"naive",
{"html4excel": 1},
"html4excel should be True or False",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"delimiter": ""}, ""),
("naive", {"delimiter": "`##`"}, ""),
pytest.param(
"naive",
{"delimiter": 1},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": -1},
"task_page_size should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 0},
"task_page_size should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 100000000},
"task_page_size should be in range from 1 to 100000000",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"task_page_size": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
("naive", {"raptor": {"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0,}}, ""),
("naive", {"raptor": {"use_raptor": False}}, ""),
pytest.param(
"naive",
{"invalid_key": "invalid_value"},
"Abnormal 'parser_config'. Invalid key: invalid_key",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": -1},
"auto_keywords should be in range from 0 to 32",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": 32},
"auto_keywords should be in range from 0 to 32",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_keywords": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": -1},
"auto_questions should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 10},
"auto_questions should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"auto_questions": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": -1},
"topn_tags should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": 10},
"topn_tags should be in range from 0 to 10",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": 3.14},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
pytest.param(
"naive",
{"topn_tags": "1024"},
"",
marks=pytest.mark.skip(reason="issues/6098"),
),
],
)
def test_parser_config(self, client, add_documents, chunk_method, parser_config, expected_message):
dataset, documents = add_documents
document = documents[0]
from operator import attrgetter
update_data = {"chunk_method": chunk_method, "parser_config": parser_config}
if expected_message:
with pytest.raises(Exception) as exception_info:
document.update(update_data)
assert expected_message in str(exception_info.value), str(exception_info.value)
else:
document.update(update_data)
updated_doc = dataset.list_documents(id=document.id)[0]
if parser_config:
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(updated_doc.parser_config) == vv, str(updated_doc)
else:
assert attrgetter(k)(updated_doc.parser_config) == v, str(updated_doc)
else:
expected_config = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
assert str(updated_doc.parser_config) == str(expected_config), str(updated_doc)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_update_document.py",
"license": "Apache License 2.0",
"lines": 397,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_upload_documents.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import string
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from configs import DOCUMENT_NAME_LIMIT
from utils.file_utils import create_txt_file
class TestDocumentsUpload:
@pytest.mark.p1
def test_valid_single_upload(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p1
@pytest.mark.parametrize(
"generate_test_files",
[
"docx",
"excel",
"ppt",
"image",
"pdf",
"txt",
"md",
"json",
"eml",
"html",
],
indirect=True,
)
def test_file_type_validation(self, add_dataset_func, generate_test_files, request):
dataset = add_dataset_func
fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p3
@pytest.mark.parametrize(
"file_type",
["exe", "unknown"],
)
def test_unsupported_file_type(self, add_dataset_func, tmp_path, file_type):
dataset = add_dataset_func
fp = tmp_path / f"ragflow_test.{file_type}"
fp.touch()
with fp.open("rb") as f:
blob = f.read()
with pytest.raises(Exception) as exception_info:
dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
assert str(exception_info.value) == f"ragflow_test.{file_type}: This type of file has not been supported yet!", str(exception_info.value)
@pytest.mark.p2
def test_missing_file(self, add_dataset_func):
dataset = add_dataset_func
with pytest.raises(Exception) as exception_info:
dataset.upload_documents([])
assert str(exception_info.value) == "No file part!", str(exception_info.value)
@pytest.mark.p3
def test_empty_file(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = tmp_path / "empty.txt"
fp.touch()
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.size == 0, str(document)
@pytest.mark.p3
def test_filename_empty(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
with pytest.raises(Exception) as exception_info:
dataset.upload_documents([{"display_name": "", "blob": blob}])
assert str(exception_info.value) == "No file selected!", str(exception_info.value)
@pytest.mark.p2
def test_filename_max_length(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 4)}.txt")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
for document in documents:
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p2
def test_duplicate_files(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}, {"display_name": fp.name, "blob": blob}])
assert len(documents) == 2, str(documents)
for i, document in enumerate(documents):
assert document.dataset_id == dataset.id, str(document)
expected_name = fp.name if i == 0 else f"{fp.stem}({i}){fp.suffix}"
assert document.name == expected_name, str(document)
@pytest.mark.p2
def test_same_file_repeat(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt")
with fp.open("rb") as f:
blob = f.read()
for i in range(3):
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
assert len(documents) == 1, str(documents)
document = documents[0]
assert document.dataset_id == dataset.id, str(document)
expected_name = fp.name if i == 0 else f"{fp.stem}({i}){fp.suffix}"
assert document.name == expected_name, str(document)
@pytest.mark.p3
def test_filename_special_characters(self, add_dataset_func, tmp_path):
dataset = add_dataset_func
illegal_chars = '<>:"/\\|?*'
translation_table = str.maketrans({char: "_" for char in illegal_chars})
safe_filename = string.punctuation.translate(translation_table)
fp = tmp_path / f"{safe_filename}.txt"
fp.write_text("Sample text content")
with fp.open("rb") as f:
blob = f.read()
documents = dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
assert len(documents) == 1, str(documents)
document = documents[0]
assert document.dataset_id == dataset.id, str(document)
assert document.name == fp.name, str(document)
@pytest.mark.p1
def test_multiple_files(self, client, add_dataset_func, tmp_path):
dataset = add_dataset_func
expected_document_count = 20
document_infos = []
for i in range(expected_document_count):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
with fp.open("rb") as f:
blob = f.read()
document_infos.append({"display_name": fp.name, "blob": blob})
documents = dataset.upload_documents(document_infos)
assert len(documents) == expected_document_count, str(documents)
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.document_count == expected_document_count, str(retrieved_dataset)
@pytest.mark.p3
def test_concurrent_upload(self, client, add_dataset_func, tmp_path):
dataset = add_dataset_func
count = 20
fps = [create_txt_file(tmp_path / f"ragflow_test_{i}.txt") for i in range(count)]
def upload_file(fp):
with fp.open("rb") as f:
blob = f.read()
return dataset.upload_documents([{"display_name": fp.name, "blob": blob}])
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(upload_file, fp) for fp in fps]
responses = list(as_completed(futures))
assert len(responses) == count, responses
retrieved_dataset = client.get_dataset(name=dataset.name)
assert retrieved_dataset.document_count == count, str(retrieved_dataset)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_upload_documents.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from ragflow_sdk import Chat, Chunk, DataSet, Document, RAGFlow, Session
from utils.file_utils import create_txt_file
# DATASET MANAGEMENT
def batch_create_datasets(client: RAGFlow, num: int) -> list[DataSet]:
return [client.create_dataset(name=f"dataset_{i}") for i in range(num)]
# FILE MANAGEMENT WITHIN DATASET
def bulk_upload_documents(dataset: DataSet, num: int, tmp_path: Path) -> list[Document]:
document_infos = []
for i in range(num):
fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
with fp.open("rb") as f:
blob = f.read()
document_infos.append({"display_name": fp.name, "blob": blob})
return dataset.upload_documents(document_infos)
# CHUNK MANAGEMENT WITHIN DATASET
def batch_add_chunks(document: Document, num: int) -> list[Chunk]:
return [document.add_chunk(content=f"chunk test {i}") for i in range(num)]
# CHAT ASSISTANT MANAGEMENT
def batch_create_chat_assistants(client: RAGFlow, num: int) -> list[Chat]:
return [client.create_chat(name=f"test_chat_assistant_{i}") for i in range(num)]
# SESSION MANAGEMENT
def batch_add_sessions_with_chat_assistant(chat_assistant: Chat, num) -> list[Session]:
return [chat_assistant.create_session(name=f"session_with_chat_assistant_{i}") for i in range(num)]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/common.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_dataset_mangement/test_create_dataset.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
from operator import attrgetter
import pytest
from configs import DATASET_NAME_LIMIT, DEFAULT_PARSER_CONFIG, HOST_ADDRESS, INVALID_API_TOKEN
from hypothesis import example, given, settings
from ragflow_sdk import DataSet, RAGFlow
from utils import encode_avatar
from utils.file_utils import create_image_file
from utils.hypothesis_utils import valid_names
@pytest.mark.usefixtures("clear_datasets")
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "Authentication error: API key is invalid!"),
(INVALID_API_TOKEN, "Authentication error: API key is invalid!"),
],
ids=["empty_auth", "invalid_api_token"],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
client.create_dataset(**{"name": "auth_test"})
assert str(exception_info.value) == expected_message
@pytest.mark.usefixtures("clear_datasets")
class TestCapability:
@pytest.mark.p3
def test_create_dataset_1k(self, client):
count = 1_000
for i in range(count):
payload = {"name": f"dataset_{i}"}
client.create_dataset(**payload)
assert len(client.list_datasets(page_size=2000)) == count
@pytest.mark.p3
def test_create_dataset_concurrent(self, client):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(client.create_dataset, **{"name": f"dataset_{i}"}) for i in range(100)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
@pytest.mark.usefixtures("clear_datasets")
class TestDatasetCreate:
@pytest.mark.p1
@given(name=valid_names())
@example("a" * 128)
@settings(max_examples=20)
def test_name(self, client, name):
dataset = client.create_dataset(**{"name": name})
assert dataset.name == name, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "String should have at least 1 character"),
(" ", "String should have at least 1 character"),
("a" * (DATASET_NAME_LIMIT + 1), "String should have at most 128 characters"),
(0, "not instance of"),
(None, "not instance of"),
],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
)
def test_name_invalid(self, client, name, expected_message):
with pytest.raises(Exception) as exception_info:
client.create_dataset(**{"name": name})
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_name_duplicated(self, client):
name = "duplicated_name"
payload = {"name": name}
client.create_dataset(**payload)
dataset = client.create_dataset(**payload)
assert dataset.name == name + "(1)", str(dataset)
@pytest.mark.p3
def test_name_case_insensitive(self, client):
name = "CaseInsensitive"
payload = {"name": name.upper()}
client.create_dataset(**payload)
payload = {"name": name.lower()}
dataset = client.create_dataset(**payload)
assert dataset.name == name.lower() + "(1)", str(dataset)
@pytest.mark.p2
def test_avatar(self, client, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {
"name": "avatar",
"avatar": f"data:image/png;base64,{encode_avatar(fn)}",
}
client.create_dataset(**payload)
@pytest.mark.p3
def test_avatar_exceeds_limit_length(self, client):
payload = {"name": "avatar_exceeds_limit_length", "avatar": "a" * 65536}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
@pytest.mark.parametrize(
"name, prefix, expected_message",
[
("empty_prefix", "", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("missing_comma", "data:image/png;base64", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>"),
("unsupported_mine_type", "invalid_mine_prefix:image/png;base64,", "Invalid MIME prefix format. Must start with 'data:'"),
("invalid_mine_type", "data:unsupported_mine_type;base64,", "Unsupported MIME type. Allowed: ['image/jpeg', 'image/png']"),
],
ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"],
)
def test_avatar_invalid_prefix(self, client, tmp_path, name, prefix, expected_message):
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {
"name": name,
"avatar": f"{prefix}{encode_avatar(fn)}",
}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_avatar_unset(self, client):
payload = {"name": "avatar_unset"}
dataset = client.create_dataset(**payload)
assert dataset.avatar is None, str(dataset)
@pytest.mark.p2
def test_description(self, client):
payload = {"name": "description", "description": "description"}
dataset = client.create_dataset(**payload)
assert dataset.description == "description", str(dataset)
@pytest.mark.p3
def test_description_exceeds_limit_length(self, client):
payload = {"name": "description_exceeds_limit_length", "description": "a" * 65536}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "String should have at most 65535 characters" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p3
def test_description_unset(self, client):
payload = {"name": "description_unset"}
dataset = client.create_dataset(**payload)
assert dataset.description is None, str(dataset)
@pytest.mark.p3
def test_description_none(self, client):
payload = {"name": "description_none", "description": None}
dataset = client.create_dataset(**payload)
assert dataset.description is None, str(dataset)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, embedding_model",
[
("BAAI/bge-small-en-v1.5@Builtin", "BAAI/bge-small-en-v1.5@Builtin"),
("embedding-3@ZHIPU-AI", "embedding-3@ZHIPU-AI"),
],
ids=["builtin_baai", "tenant_zhipu"],
)
def test_embedding_model(self, client, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model}
dataset = client.create_dataset(**payload)
assert dataset.embedding_model == embedding_model, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("unknown_llm_name", "unknown@ZHIPU-AI"),
("unknown_llm_factory", "embedding-3@unknown"),
("tenant_no_auth_default_tenant_llm", "text-embedding-v3@Tongyi-Qianwen"),
("tenant_no_auth", "text-embedding-3-small@OpenAI"),
],
ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"],
)
def test_embedding_model_invalid(self, client, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
if "tenant_no_auth" in name:
assert str(exception_info.value) == f"Unauthorized model: <{embedding_model}>", str(exception_info.value)
else:
assert str(exception_info.value) == f"Unsupported model: <{embedding_model}>", str(exception_info.value)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, embedding_model",
[
("empty", ""),
("space", " "),
("missing_at", "BAAI/bge-small-en-v1.5Builtin"),
("missing_model_name", "@Builtin"),
("missing_provider", "BAAI/bge-small-en-v1.5@"),
("whitespace_only_model_name", " @Builtin"),
("whitespace_only_provider", "BAAI/bge-small-en-v1.5@ "),
],
ids=["empty", "space", "missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"],
)
def test_embedding_model_format(self, client, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
if name in ["empty", "space", "missing_at"]:
assert "Embedding model identifier must follow <model_name>@<provider> format" in str(exception_info.value), str(exception_info.value)
else:
assert "Both model_name and provider must be non-empty strings" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_embedding_model_unset(self, client):
payload = {"name": "embedding_model_unset"}
dataset = client.create_dataset(**payload)
assert dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(dataset)
@pytest.mark.p2
def test_embedding_model_none(self, client):
payload = {"name": "embedding_model_none", "embedding_model": None}
dataset = client.create_dataset(**payload)
assert dataset.embedding_model == "BAAI/bge-small-en-v1.5@Builtin", str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, permission",
[
("me", "me"),
("team", "team"),
],
ids=["me", "team"],
)
def test_permission(self, client, name, permission):
payload = {"name": name, "permission": permission}
dataset = client.create_dataset(**payload)
assert dataset.permission == permission.lower().strip(), str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, permission",
[
("empty", ""),
("unknown", "unknown"),
("me_upercase", "ME"),
("team_upercase", "TEAM"),
("whitespace", " ME "),
],
ids=["empty", "unknown", "me_upercase", "team_upercase", "whitespace"],
)
def test_permission_invalid(self, client, name, permission):
payload = {"name": name, "permission": permission}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "Input should be 'me' or 'team'" in str(exception_info.value)
@pytest.mark.p2
def test_permission_unset(self, client):
payload = {"name": "permission_unset"}
dataset = client.create_dataset(**payload)
assert dataset.permission == "me", str(dataset)
@pytest.mark.p3
def test_permission_none(self, client):
payload = {"name": "permission_none", "permission": None}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, chunk_method",
[
("naive", "naive"),
("book", "book"),
("email", "email"),
("laws", "laws"),
("manual", "manual"),
("one", "one"),
("paper", "paper"),
("picture", "picture"),
("presentation", "presentation"),
("qa", "qa"),
("table", "table"),
("tag", "tag"),
],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
)
def test_chunk_method(self, client, name, chunk_method):
payload = {"name": name, "chunk_method": chunk_method}
dataset = client.create_dataset(**payload)
assert dataset.chunk_method == chunk_method, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, chunk_method",
[
("empty", ""),
("unknown", "unknown"),
],
ids=["empty", "unknown"],
)
def test_chunk_method_invalid(self, client, name, chunk_method):
payload = {"name": name, "chunk_method": chunk_method}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_chunk_method_unset(self, client):
payload = {"name": "chunk_method_unset"}
dataset = client.create_dataset(**payload)
assert dataset.chunk_method == "naive", str(dataset)
@pytest.mark.p3
def test_chunk_method_none(self, client):
payload = {"name": "chunk_method_none", "chunk_method": None}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "not instance of" in str(exception_info.value), str(exception_info.value)
@pytest.mark.p1
@pytest.mark.parametrize(
"name, parser_config",
[
("auto_keywords_min", {"auto_keywords": 0}),
("auto_keywords_mid", {"auto_keywords": 16}),
("auto_keywords_max", {"auto_keywords": 32}),
("auto_questions_min", {"auto_questions": 0}),
("auto_questions_mid", {"auto_questions": 5}),
("auto_questions_max", {"auto_questions": 10}),
("chunk_token_num_min", {"chunk_token_num": 1}),
("chunk_token_num_mid", {"chunk_token_num": 1024}),
("chunk_token_num_max", {"chunk_token_num": 2048}),
("delimiter", {"delimiter": "\n"}),
("delimiter_space", {"delimiter": " "}),
("html4excel_true", {"html4excel": True}),
("html4excel_false", {"html4excel": False}),
("layout_recognize_DeepDOC", {"layout_recognize": "DeepDOC"}),
("layout_recognize_navie", {"layout_recognize": "Plain Text"}),
("tag_kb_ids", {"tag_kb_ids": ["1", "2"]}),
("topn_tags_min", {"topn_tags": 1}),
("topn_tags_mid", {"topn_tags": 5}),
("topn_tags_max", {"topn_tags": 10}),
("filename_embd_weight_min", {"filename_embd_weight": 0.1}),
("filename_embd_weight_mid", {"filename_embd_weight": 0.5}),
("filename_embd_weight_max", {"filename_embd_weight": 1.0}),
("task_page_size_min", {"task_page_size": 1}),
("task_page_size_None", {"task_page_size": None}),
("pages", {"pages": [[1, 100]]}),
("pages_none", {"pages": None}),
("graphrag_true", {"graphrag": {"use_graphrag": True}}),
("graphrag_false", {"graphrag": {"use_graphrag": False}}),
("graphrag_entity_types", {"graphrag": {"entity_types": ["age", "sex", "height", "weight"]}}),
("graphrag_method_general", {"graphrag": {"method": "general"}}),
("graphrag_method_light", {"graphrag": {"method": "light"}}),
("graphrag_community_true", {"graphrag": {"community": True}}),
("graphrag_community_false", {"graphrag": {"community": False}}),
("graphrag_resolution_true", {"graphrag": {"resolution": True}}),
("graphrag_resolution_false", {"graphrag": {"resolution": False}}),
("raptor_true", {"raptor": {"use_raptor": True}}),
("raptor_false", {"raptor": {"use_raptor": False}}),
("raptor_prompt", {"raptor": {"prompt": "Who are you?"}}),
("raptor_max_token_min", {"raptor": {"max_token": 1}}),
("raptor_max_token_mid", {"raptor": {"max_token": 1024}}),
("raptor_max_token_max", {"raptor": {"max_token": 2048}}),
("raptor_threshold_min", {"raptor": {"threshold": 0.0}}),
("raptor_threshold_mid", {"raptor": {"threshold": 0.5}}),
("raptor_threshold_max", {"raptor": {"threshold": 1.0}}),
("raptor_max_cluster_min", {"raptor": {"max_cluster": 1}}),
("raptor_max_cluster_mid", {"raptor": {"max_cluster": 512}}),
("raptor_max_cluster_max", {"raptor": {"max_cluster": 1024}}),
("raptor_random_seed_min", {"raptor": {"random_seed": 0}}),
],
ids=[
"auto_keywords_min",
"auto_keywords_mid",
"auto_keywords_max",
"auto_questions_min",
"auto_questions_mid",
"auto_questions_max",
"chunk_token_num_min",
"chunk_token_num_mid",
"chunk_token_num_max",
"delimiter",
"delimiter_space",
"html4excel_true",
"html4excel_false",
"layout_recognize_DeepDOC",
"layout_recognize_navie",
"tag_kb_ids",
"topn_tags_min",
"topn_tags_mid",
"topn_tags_max",
"filename_embd_weight_min",
"filename_embd_weight_mid",
"filename_embd_weight_max",
"task_page_size_min",
"task_page_size_None",
"pages",
"pages_none",
"graphrag_true",
"graphrag_false",
"graphrag_entity_types",
"graphrag_method_general",
"graphrag_method_light",
"graphrag_community_true",
"graphrag_community_false",
"graphrag_resolution_true",
"graphrag_resolution_false",
"raptor_true",
"raptor_false",
"raptor_prompt",
"raptor_max_token_min",
"raptor_max_token_mid",
"raptor_max_token_max",
"raptor_threshold_min",
"raptor_threshold_mid",
"raptor_threshold_max",
"raptor_max_cluster_min",
"raptor_max_cluster_mid",
"raptor_max_cluster_max",
"raptor_random_seed_min",
],
)
def test_parser_config(self, client, name, parser_config):
parser_config_o = DataSet.ParserConfig(client, parser_config)
payload = {"name": name, "parser_config": parser_config_o}
dataset = client.create_dataset(**payload)
for k, v in parser_config.items():
if isinstance(v, dict):
for kk, vv in v.items():
assert attrgetter(f"{k}.{kk}")(dataset.parser_config) == vv, str(dataset)
else:
assert attrgetter(k)(dataset.parser_config) == v, str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"name, parser_config, expected_message",
[
("auto_keywords_min_limit", {"auto_keywords": -1}, "Input should be greater than or equal to 0"),
("auto_keywords_max_limit", {"auto_keywords": 33}, "Input should be less than or equal to 32"),
("auto_keywords_float_not_allowed", {"auto_keywords": 3.14}, "Input should be a valid integer"),
("auto_keywords_type_invalid", {"auto_keywords": "string"}, "Input should be a valid integer"),
("auto_questions_min_limit", {"auto_questions": -1}, "Input should be greater than or equal to 0"),
("auto_questions_max_limit", {"auto_questions": 11}, "Input should be less than or equal to 10"),
("auto_questions_float_not_allowed", {"auto_questions": 3.14}, "Input should be a valid integer"),
("auto_questions_type_invalid", {"auto_questions": "string"}, "Input should be a valid integer"),
("chunk_token_num_min_limit", {"chunk_token_num": 0}, "Input should be greater than or equal to 1"),
("chunk_token_num_max_limit", {"chunk_token_num": 2049}, "Input should be less than or equal to 2048"),
("chunk_token_num_float_not_allowed", {"chunk_token_num": 3.14}, "Input should be a valid integer"),
("chunk_token_num_type_invalid", {"chunk_token_num": "string"}, "Input should be a valid integer"),
("delimiter_empty", {"delimiter": ""}, "String should have at least 1 character"),
("html4excel_type_invalid", {"html4excel": "string"}, "Input should be a valid boolean"),
("tag_kb_ids_not_list", {"tag_kb_ids": "1,2"}, "Input should be a valid list"),
("tag_kb_ids_int_in_list", {"tag_kb_ids": [1, 2]}, "Input should be a valid string"),
("topn_tags_min_limit", {"topn_tags": 0}, "Input should be greater than or equal to 1"),
("topn_tags_max_limit", {"topn_tags": 11}, "Input should be less than or equal to 10"),
("topn_tags_float_not_allowed", {"topn_tags": 3.14}, "Input should be a valid integer"),
("topn_tags_type_invalid", {"topn_tags": "string"}, "Input should be a valid integer"),
("filename_embd_weight_min_limit", {"filename_embd_weight": -1}, "Input should be greater than or equal to 0"),
("filename_embd_weight_max_limit", {"filename_embd_weight": 1.1}, "Input should be less than or equal to 1"),
("filename_embd_weight_type_invalid", {"filename_embd_weight": "string"}, "Input should be a valid number"),
("task_page_size_min_limit", {"task_page_size": 0}, "Input should be greater than or equal to 1"),
("task_page_size_float_not_allowed", {"task_page_size": 3.14}, "Input should be a valid integer"),
("task_page_size_type_invalid", {"task_page_size": "string"}, "Input should be a valid integer"),
("pages_not_list", {"pages": "1,2"}, "Input should be a valid list"),
("pages_not_list_in_list", {"pages": ["1,2"]}, "Input should be a valid list"),
("pages_not_int_list", {"pages": [["string1", "string2"]]}, "Input should be a valid integer"),
("graphrag_type_invalid", {"graphrag": {"use_graphrag": "string"}}, "Input should be a valid boolean"),
("graphrag_entity_types_not_list", {"graphrag": {"entity_types": "1,2"}}, "Input should be a valid list"),
("graphrag_entity_types_not_str_in_list", {"graphrag": {"entity_types": [1, 2]}}, "nput should be a valid string"),
("graphrag_method_unknown", {"graphrag": {"method": "unknown"}}, "Input should be 'light' or 'general'"),
("graphrag_method_none", {"graphrag": {"method": None}}, "Input should be 'light' or 'general'"),
("graphrag_community_type_invalid", {"graphrag": {"community": "string"}}, "Input should be a valid boolean"),
("graphrag_resolution_type_invalid", {"graphrag": {"resolution": "string"}}, "Input should be a valid boolean"),
("raptor_type_invalid", {"raptor": {"use_raptor": "string"}}, "Input should be a valid boolean"),
("raptor_prompt_empty", {"raptor": {"prompt": ""}}, "String should have at least 1 character"),
("raptor_prompt_space", {"raptor": {"prompt": " "}}, "String should have at least 1 character"),
("raptor_max_token_min_limit", {"raptor": {"max_token": 0}}, "Input should be greater than or equal to 1"),
("raptor_max_token_max_limit", {"raptor": {"max_token": 2049}}, "Input should be less than or equal to 2048"),
("raptor_max_token_float_not_allowed", {"raptor": {"max_token": 3.14}}, "Input should be a valid integer"),
("raptor_max_token_type_invalid", {"raptor": {"max_token": "string"}}, "Input should be a valid integer"),
("raptor_threshold_min_limit", {"raptor": {"threshold": -0.1}}, "Input should be greater than or equal to 0"),
("raptor_threshold_max_limit", {"raptor": {"threshold": 1.1}}, "Input should be less than or equal to 1"),
("raptor_threshold_type_invalid", {"raptor": {"threshold": "string"}}, "Input should be a valid number"),
("raptor_max_cluster_min_limit", {"raptor": {"max_cluster": 0}}, "Input should be greater than or equal to 1"),
("raptor_max_cluster_max_limit", {"raptor": {"max_cluster": 1025}}, "Input should be less than or equal to 1024"),
("raptor_max_cluster_float_not_allowed", {"raptor": {"max_cluster": 3.14}}, "Input should be a valid integer"),
("raptor_max_cluster_type_invalid", {"raptor": {"max_cluster": "string"}}, "Input should be a valid integer"),
("raptor_random_seed_min_limit", {"raptor": {"random_seed": -1}}, "Input should be greater than or equal to 0"),
("raptor_random_seed_float_not_allowed", {"raptor": {"random_seed": 3.14}}, "Input should be a valid integer"),
("raptor_random_seed_type_invalid", {"raptor": {"random_seed": "string"}}, "Input should be a valid integer"),
("parser_config_type_invalid", {"delimiter": "a" * 65536}, "Parser config exceeds size limit (max 65,535 characters)"),
],
ids=[
"auto_keywords_min_limit",
"auto_keywords_max_limit",
"auto_keywords_float_not_allowed",
"auto_keywords_type_invalid",
"auto_questions_min_limit",
"auto_questions_max_limit",
"auto_questions_float_not_allowed",
"auto_questions_type_invalid",
"chunk_token_num_min_limit",
"chunk_token_num_max_limit",
"chunk_token_num_float_not_allowed",
"chunk_token_num_type_invalid",
"delimiter_empty",
"html4excel_type_invalid",
"tag_kb_ids_not_list",
"tag_kb_ids_int_in_list",
"topn_tags_min_limit",
"topn_tags_max_limit",
"topn_tags_float_not_allowed",
"topn_tags_type_invalid",
"filename_embd_weight_min_limit",
"filename_embd_weight_max_limit",
"filename_embd_weight_type_invalid",
"task_page_size_min_limit",
"task_page_size_float_not_allowed",
"task_page_size_type_invalid",
"pages_not_list",
"pages_not_list_in_list",
"pages_not_int_list",
"graphrag_type_invalid",
"graphrag_entity_types_not_list",
"graphrag_entity_types_not_str_in_list",
"graphrag_method_unknown",
"graphrag_method_none",
"graphrag_community_type_invalid",
"graphrag_resolution_type_invalid",
"raptor_type_invalid",
"raptor_prompt_empty",
"raptor_prompt_space",
"raptor_max_token_min_limit",
"raptor_max_token_max_limit",
"raptor_max_token_float_not_allowed",
"raptor_max_token_type_invalid",
"raptor_threshold_min_limit",
"raptor_threshold_max_limit",
"raptor_threshold_type_invalid",
"raptor_max_cluster_min_limit",
"raptor_max_cluster_max_limit",
"raptor_max_cluster_float_not_allowed",
"raptor_max_cluster_type_invalid",
"raptor_random_seed_min_limit",
"raptor_random_seed_float_not_allowed",
"raptor_random_seed_type_invalid",
"parser_config_type_invalid",
],
)
def test_parser_config_invalid(self, client, name, parser_config, expected_message):
parser_config_o = DataSet.ParserConfig(client, parser_config)
payload = {"name": name, "parser_config": parser_config_o}
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert expected_message in str(exception_info.value), str(exception_info.value)
@pytest.mark.p2
def test_parser_config_empty(self, client):
excepted_value = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
parser_config_o = DataSet.ParserConfig(client, {})
payload = {"name": "parser_config_empty", "parser_config": parser_config_o}
dataset = client.create_dataset(**payload)
assert str(dataset.parser_config) == str(excepted_value), str(dataset)
@pytest.mark.p2
def test_parser_config_unset(self, client):
excepted_value = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
payload = {"name": "parser_config_unset"}
dataset = client.create_dataset(**payload)
assert str(dataset.parser_config) == str(excepted_value), str(dataset)
@pytest.mark.p3
def test_parser_config_none(self, client):
excepted_value = DataSet.ParserConfig(
client,
DEFAULT_PARSER_CONFIG,
)
payload = {"name": "parser_config_empty", "parser_config": None}
dataset = client.create_dataset(**payload)
assert str(dataset.parser_config) == str(excepted_value), str(dataset)
@pytest.mark.p2
@pytest.mark.parametrize(
"payload",
[
{"name": "id", "id": "id"},
{"name": "tenant_id", "tenant_id": "e57c1966f99211efb41e9e45646e0111"},
{"name": "created_by", "created_by": "created_by"},
{"name": "create_date", "create_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"name": "create_time", "create_time": 1741671443322},
{"name": "update_date", "update_date": "Tue, 11 Mar 2025 13:37:23 GMT"},
{"name": "update_time", "update_time": 1741671443339},
{"name": "document_count", "document_count": 1},
{"name": "chunk_count", "chunk_count": 1},
{"name": "token_num", "token_num": 1},
{"name": "status", "status": "1"},
{"name": "pagerank", "pagerank": 50},
{"name": "unknown_field", "unknown_field": "unknown_field"},
],
)
def test_unsupported_field(self, client, payload):
with pytest.raises(Exception) as exception_info:
client.create_dataset(**payload)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
@pytest.mark.usefixtures("clear_datasets")
class TestParserConfigBugFix:
@pytest.mark.p1
def test_parser_config_missing_raptor_and_graphrag(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024})
payload = {"name": "test_parser_config_missing_fields_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert hasattr(config, "raptor"), "raptor field should be present"
assert hasattr(config, "graphrag"), "graphrag field should be present"
assert config.raptor.use_raptor is False, "raptor.use_raptor should default to False"
assert config.graphrag.use_graphrag is False, "graphrag.use_graphrag should default to False"
assert config.chunk_token_num == 1024, "User-provided chunk_token_num should be preserved"
@pytest.mark.p1
def test_parser_config_with_only_raptor(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024, "raptor": {"use_raptor": True}})
payload = {"name": "test_parser_config_only_raptor_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert config.raptor.use_raptor is True, "User-provided raptor.use_raptor should be preserved"
assert hasattr(config, "graphrag"), "graphrag field should be present"
assert config.graphrag.use_graphrag is False, "graphrag.use_graphrag should default to False"
@pytest.mark.p1
def test_parser_config_with_only_graphrag(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024, "graphrag": {"use_graphrag": True}})
payload = {"name": "test_parser_config_only_graphrag_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert hasattr(config, "raptor"), "raptor field should be present"
assert config.raptor.use_raptor is False, "raptor.use_raptor should default to False"
assert config.graphrag.use_graphrag is True, "User-provided graphrag.use_graphrag should be preserved"
@pytest.mark.p1
def test_parser_config_with_both_fields(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024, "raptor": {"use_raptor": True}, "graphrag": {"use_graphrag": True}})
payload = {"name": "test_parser_config_both_fields_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert config.raptor.use_raptor is True, "User-provided raptor.use_raptor should be preserved"
assert config.graphrag.use_graphrag is True, "User-provided graphrag.use_graphrag should be preserved"
@pytest.mark.p2
@pytest.mark.parametrize("chunk_method", ["qa", "manual", "paper", "book", "laws", "presentation"])
def test_parser_config_different_chunk_methods(self, client, chunk_method):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 512})
payload = {"name": f"test_parser_config_{chunk_method}_sdk", "chunk_method": chunk_method, "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert hasattr(config, "raptor"), f"raptor field should be present for {chunk_method}"
assert hasattr(config, "graphrag"), f"graphrag field should be present for {chunk_method}"
assert config.raptor.use_raptor is False, f"raptor.use_raptor should default to False for {chunk_method}"
assert config.graphrag.use_graphrag is False, f"graphrag.use_graphrag should default to False for {chunk_method}"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_dataset_mangement/test_create_dataset.py",
"license": "Apache License 2.0",
"lines": 646,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.