sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
infiniflow/ragflow:test/testcases/test_sdk_api/test_message_management/test_update_message_status.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from ragflow_sdk import RAGFlow, Memory
from configs import INVALID_API_TOKEN, HOST_ADDRESS
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_message",
[
(None, "<Unauthorized '401: Unauthorized'>"),
(INVALID_API_TOKEN, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_message):
client = RAGFlow(invalid_auth, HOST_ADDRESS)
with pytest.raises(Exception) as exception_info:
memory = Memory(client, {"id": "empty_memory_id"})
memory.update_message_status(0, False)
assert str(exception_info.value) == expected_message, str(exception_info.value)
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestUpdateMessageStatus:
@pytest.mark.p1
def test_update_to_false(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
list_res = memory.list_memory_messages()
assert len(list_res["messages"]["message_list"]) > 0, str(list_res)
message = random.choice(list_res["messages"]["message_list"])
res = memory.update_message_status(message["message_id"], False)
assert res, str(res)
updated_message_res = memory.get_message_content(message["message_id"])
assert not updated_message_res["status"], str(updated_message_res)
@pytest.mark.p1
def test_update_to_true(self, client):
memory_id = self.memory_id
memory = Memory(client, {"id": memory_id})
list_res = memory.list_memory_messages()
assert len(list_res["messages"]["message_list"]) > 0, str(list_res)
# set 1 random message to false first
message = random.choice(list_res["messages"]["message_list"])
set_to_false_res = memory.update_message_status(message["message_id"], False)
assert set_to_false_res, str(set_to_false_res)
updated_message_res = memory.get_message_content(message["message_id"])
assert not updated_message_res["status"], updated_message_res
# set to true
set_to_true_res = memory.update_message_status(message["message_id"], True)
assert set_to_true_res, str(set_to_true_res)
res = memory.get_message_content(message["message_id"])
assert res["status"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_message_management/test_update_message_status.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_add_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import uuid
import pytest
from test_web_api.common import list_memory_message, add_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = add_message(invalid_auth, {})
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_empty_raw_type_memory")
class TestAddRawMessage:
@pytest.mark.p1
def test_add_raw_message(self, WebApiAuth):
memory_id = self.memory_id
agent_id = uuid.uuid4().hex
session_id = uuid.uuid4().hex
message_payload = {
"memory_id": [memory_id],
"agent_id": agent_id,
"session_id": session_id,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": """
A pineapple is a tropical fruit known for its sweet, tangy flavor and distinctive, spiky appearance. Here are the key facts:
Scientific Name: Ananas comosus
Physical Description: It has a tough, spiky, diamond-patterned outer skin (rind) that is usually green, yellow, or brownish. Inside, the juicy yellow flesh surrounds a fibrous core.
Growth: Unlike most fruits, pineapples do not grow on trees. They grow from a central stem as a composite fruit, meaning they are formed from many individual berries that fuse together around the core. They grow on a short, leafy plant close to the ground.
Uses: Pineapples are eaten fresh, cooked, grilled, juiced, or canned. They are a popular ingredient in desserts, fruit salads, savory dishes (like pizzas or ham glazes), smoothies, and cocktails.
Nutrition: They are a good source of Vitamin C, manganese, and contain an enzyme called bromelain, which aids in digestion and can tenderize meat.
Symbolism: The pineapple is a traditional symbol of hospitality and welcome in many cultures.
Are you asking about the fruit itself, or its use in a specific context?
"""
}
add_res = add_message(WebApiAuth, message_payload)
assert add_res["code"] == 0, add_res
time.sleep(2) # make sure refresh to index before search
message_res = list_memory_message(WebApiAuth, memory_id, params={"agent_id": agent_id, "keywords": session_id})
assert message_res["code"] == 0, message_res
assert message_res["data"]["messages"]["total_count"] > 0
for message in message_res["data"]["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
assert message["session_id"] == session_id, message
@pytest.mark.p2
def test_add_message_invalid_memory_id(self, WebApiAuth):
message_payload = {
"memory_id": ["missing_memory_id"],
"agent_id": uuid.uuid4().hex,
"session_id": uuid.uuid4().hex,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": "pineapple response",
}
res = add_message(WebApiAuth, message_payload)
assert res["code"] == 500, res
assert "Some messages failed to add" in res["message"], res
@pytest.mark.usefixtures("add_empty_multiple_type_memory")
class TestAddMultipleTypeMessage:
@pytest.mark.p1
def test_add_multiple_type_message(self, WebApiAuth):
memory_id = self.memory_id
agent_id = uuid.uuid4().hex
session_id = uuid.uuid4().hex
message_payload = {
"memory_id": [memory_id],
"agent_id": agent_id,
"session_id": session_id,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": """
A pineapple is a tropical fruit known for its sweet, tangy flavor and distinctive, spiky appearance. Here are the key facts:
Scientific Name: Ananas comosus
Physical Description: It has a tough, spiky, diamond-patterned outer skin (rind) that is usually green, yellow, or brownish. Inside, the juicy yellow flesh surrounds a fibrous core.
Growth: Unlike most fruits, pineapples do not grow on trees. They grow from a central stem as a composite fruit, meaning they are formed from many individual berries that fuse together around the core. They grow on a short, leafy plant close to the ground.
Uses: Pineapples are eaten fresh, cooked, grilled, juiced, or canned. They are a popular ingredient in desserts, fruit salads, savory dishes (like pizzas or ham glazes), smoothies, and cocktails.
Nutrition: They are a good source of Vitamin C, manganese, and contain an enzyme called bromelain, which aids in digestion and can tenderize meat.
Symbolism: The pineapple is a traditional symbol of hospitality and welcome in many cultures.
Are you asking about the fruit itself, or its use in a specific context?
"""
}
add_res = add_message(WebApiAuth, message_payload)
assert add_res["code"] == 0, add_res
time.sleep(2) # make sure refresh to index before search
message_res = list_memory_message(WebApiAuth, memory_id, params={"agent_id": agent_id, "keywords": session_id})
assert message_res["code"] == 0, message_res
assert message_res["data"]["messages"]["total_count"] > 0
for message in message_res["data"]["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
assert message["session_id"] == session_id, message
@pytest.mark.usefixtures("add_2_multiple_type_memory")
class TestAddToMultipleMemory:
@pytest.mark.p1
def test_add_to_multiple_memory(self, WebApiAuth):
memory_ids = self.memory_ids
agent_id = uuid.uuid4().hex
session_id = uuid.uuid4().hex
message_payload = {
"memory_id": memory_ids,
"agent_id": agent_id,
"session_id": session_id,
"user_id": "",
"user_input": "what is pineapple?",
"agent_response": """
A pineapple is a tropical fruit known for its sweet, tangy flavor and distinctive, spiky appearance. Here are the key facts:
Scientific Name: Ananas comosus
Physical Description: It has a tough, spiky, diamond-patterned outer skin (rind) that is usually green, yellow, or brownish. Inside, the juicy yellow flesh surrounds a fibrous core.
Growth: Unlike most fruits, pineapples do not grow on trees. They grow from a central stem as a composite fruit, meaning they are formed from many individual berries that fuse together around the core. They grow on a short, leafy plant close to the ground.
Uses: Pineapples are eaten fresh, cooked, grilled, juiced, or canned. They are a popular ingredient in desserts, fruit salads, savory dishes (like pizzas or ham glazes), smoothies, and cocktails.
Nutrition: They are a good source of Vitamin C, manganese, and contain an enzyme called bromelain, which aids in digestion and can tenderize meat.
Symbolism: The pineapple is a traditional symbol of hospitality and welcome in many cultures.
Are you asking about the fruit itself, or its use in a specific context?
"""
}
add_res = add_message(WebApiAuth, message_payload)
assert add_res["code"] == 0, add_res
time.sleep(2) # make sure refresh to index before search
for memory_id in memory_ids:
message_res = list_memory_message(WebApiAuth, memory_id, params={"agent_id": agent_id, "keywords": session_id})
assert message_res["code"] == 0, message_res
assert message_res["data"]["messages"]["total_count"] > 0
for message in message_res["data"]["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
assert message["session_id"] == session_id, message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_add_message.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_forget_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
import requests
from test_web_api.common import forget_message, list_memory_message, get_message_content
from configs import HOST_ADDRESS, INVALID_API_TOKEN, VERSION
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = forget_message(invalid_auth, "empty_memory_id", 0)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestForgetMessage:
@pytest.mark.p1
def test_forget_message(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert len(list_res["data"]["messages"]["message_list"]) > 0
message = random.choice(list_res["data"]["messages"]["message_list"])
res = forget_message(WebApiAuth, memory_id, message["message_id"])
assert res["code"] == 0, res
forgot_message_res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert forgot_message_res["code"] == 0, forgot_message_res
assert forgot_message_res["data"]["forget_at"] not in ["-", ""], forgot_message_res
@pytest.mark.p2
def test_forget_message_invalid_memory_id(self, WebApiAuth):
res = forget_message(WebApiAuth, "missing_memory_id", 1)
assert res["code"] == 404, res
assert "not found" in res["message"].lower(), res
@pytest.mark.p2
def test_forget_message_invalid_message_id(self, WebApiAuth):
memory_id = self.memory_id
url = f"{HOST_ADDRESS}/api/{VERSION}/messages/{memory_id}:invalid_message_id"
res = requests.delete(url=url, headers={"Content-Type": "application/json"}, auth=WebApiAuth).json()
assert res["code"] == 500, res
assert "Internal server error" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_forget_message.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_search_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from test_web_api.common import search_message, list_memory_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = search_message(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_multiple_type_message_func")
class TestSearchMessage:
@pytest.mark.p1
def test_query(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert list_res["data"]["messages"]["total_count"] > 0
query = "Coriander is a versatile herb with two main edible parts. What's its name can refer to?"
res = search_message(WebApiAuth, {"memory_id": memory_id, "query": query})
assert res["code"] == 0, res
assert len(res["data"]) > 0
@pytest.mark.p2
def test_query_with_agent_filter(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert list_res["data"]["messages"]["total_count"] > 0
agent_id = self.agent_id
query = "Coriander is a versatile herb with two main edible parts. What's its name can refer to?"
res = search_message(WebApiAuth, {"memory_id": memory_id, "query": query, "agent_id": agent_id})
assert res["code"] == 0, res
assert len(res["data"]) > 0
for message in res["data"]:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
def test_query_with_not_default_params(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert list_res["data"]["messages"]["total_count"] > 0
query = "Coriander is a versatile herb with two main edible parts. What's its name can refer to?"
params = {
"similarity_threshold": 0.1,
"keywords_similarity_weight": 0.6,
"top_n": 4
}
res = search_message(WebApiAuth, {"memory_id": memory_id, "query": query, **params})
assert res["code"] == 0, res
assert len(res["data"]) > 0
assert len(res["data"]) <= params["top_n"]
@pytest.mark.p2
def test_query_missing_query(self, WebApiAuth):
memory_id = self.memory_id
res = search_message(WebApiAuth, {"memory_id": memory_id})
assert res["code"] in [100, 500], res
@pytest.mark.p2
def test_query_missing_memory_id(self, WebApiAuth):
res = search_message(WebApiAuth, {"query": "what is coriander"})
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
@pytest.mark.p2
def test_query_with_csv_memory_ids(self, WebApiAuth):
memory_id = self.memory_id
query = "Coriander is a versatile herb."
res = search_message(WebApiAuth, {"memory_id": f"{memory_id},{memory_id}", "query": query})
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_search_message.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_update_message_status.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
import requests
from test_web_api.common import update_message_status, list_memory_message, get_message_content
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from configs import HOST_ADDRESS, VERSION
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = update_message_status(invalid_auth, "empty_memory_id", 0, False)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestUpdateMessageStatus:
@pytest.mark.p1
def test_update_to_false(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert len(list_res["data"]["messages"]["message_list"]) > 0
message = random.choice(list_res["data"]["messages"]["message_list"])
res = update_message_status(WebApiAuth, memory_id, message["message_id"], False)
assert res["code"] == 0, res
updated_message_res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert updated_message_res["code"] == 0, res
assert not updated_message_res["data"]["status"], res
@pytest.mark.p1
def test_update_to_true(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
assert len(list_res["data"]["messages"]["message_list"]) > 0
# set 1 random message to false first
message = random.choice(list_res["data"]["messages"]["message_list"])
set_to_false_res = update_message_status(WebApiAuth, memory_id, message["message_id"], False)
assert set_to_false_res["code"] == 0, set_to_false_res
updated_message_res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert updated_message_res["code"] == 0, set_to_false_res
assert not updated_message_res["data"]["status"], updated_message_res
# set to true
set_to_true_res = update_message_status(WebApiAuth, memory_id, message["message_id"], True)
assert set_to_true_res["code"] == 0, set_to_true_res
res = get_message_content(WebApiAuth, memory_id, message["message_id"])
assert res["code"] == 0, res
assert res["data"]["status"], res
@pytest.mark.p2
def test_update_invalid_status_type(self, WebApiAuth):
memory_id = self.memory_id
list_res = list_memory_message(WebApiAuth, memory_id)
assert list_res["code"] == 0, list_res
message_id = list_res["data"]["messages"]["message_list"][0]["message_id"]
url = f"{HOST_ADDRESS}/api/{VERSION}/messages/{memory_id}:{message_id}"
res = requests.put(url=url, headers={"Content-Type": "application/json"}, auth=WebApiAuth, json={"status": "false"}).json()
assert res["code"] == 101, res
assert "Status must be a boolean." in res["message"], res
@pytest.mark.p2
def test_update_invalid_memory_id(self, WebApiAuth):
res = update_message_status(WebApiAuth, "missing_memory_id", 1, False)
assert res["code"] == 404, res
assert "not found" in res["message"].lower(), res
@pytest.mark.p2
def test_update_invalid_message_id(self, WebApiAuth):
memory_id = self.memory_id
url = f"{HOST_ADDRESS}/api/{VERSION}/messages/{memory_id}:invalid_message_id"
res = requests.put(
url=url,
headers={"Content-Type": "application/json"},
auth=WebApiAuth,
json={"status": True},
).json()
assert res["code"] == 500, res
assert "Internal server error" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_update_message_status.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/db/services/system_settings_service.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from common.time_utils import current_timestamp, datetime_format
from api.db.db_models import DB
from api.db.db_models import SystemSettings
from api.db.services.common_service import CommonService
class SystemSettingsService(CommonService):
model = SystemSettings
@classmethod
@DB.connection_context()
def get_by_name(cls, name):
objs = cls.model.select().where(cls.model.name == name)
return objs
@classmethod
@DB.connection_context()
def update_by_name(cls, name, obj):
obj["update_time"] = current_timestamp()
obj["update_date"] = datetime_format(datetime.now())
cls.model.update(obj).where(cls.model.name == name).execute()
return SystemSettings(**obj)
@classmethod
@DB.connection_context()
def get_record_count(cls):
count = cls.model.select().count()
return count
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/system_settings_service.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_get_message_content.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from test_web_api.common import get_message_content, get_recent_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = get_message_content(invalid_auth, "empty_memory_id", 0)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_multiple_type_message_func")
class TestGetMessageContent:
@pytest.mark.p1
def test_get_message_content(self, WebApiAuth):
memory_id = self.memory_id
recent_messages = get_recent_message(WebApiAuth, {"memory_id": memory_id})
assert len(recent_messages["data"]) > 0, recent_messages
message = random.choice(recent_messages["data"])
message_id = message["message_id"]
content_res = get_message_content(WebApiAuth, memory_id, message_id)
for field in ["content", "content_embed"]:
assert field in content_res["data"]
assert content_res["data"][field] is not None, content_res
@pytest.mark.p2
def test_get_message_content_invalid_memory_id(self, WebApiAuth):
res = get_message_content(WebApiAuth, "missing_memory_id", 1)
assert res["code"] == 404, res
assert "not found" in res["message"].lower(), res
@pytest.mark.p2
def test_get_message_content_invalid_message_id(self, WebApiAuth):
memory_id = self.memory_id
res = get_message_content(WebApiAuth, memory_id, 999999999)
assert res["code"] == 404, res
assert "not found" in res["message"].lower(), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_get_message_content.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_get_recent_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from test_web_api.common import get_recent_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = get_recent_message(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestGetRecentMessage:
@pytest.mark.p1
def test_get_recent_messages(self, WebApiAuth):
memory_id = self.memory_id
res = get_recent_message(WebApiAuth, params={"memory_id": memory_id})
assert res["code"] == 0, res
assert len(res["data"]) == 5, res
@pytest.mark.p2
def test_filter_recent_messages_by_agent(self, WebApiAuth):
memory_id = self.memory_id
agent_ids = self.agent_ids
agent_id = random.choice(agent_ids)
res = get_recent_message(WebApiAuth, params={"agent_id": agent_id, "memory_id": memory_id})
assert res["code"] == 0, res
for message in res["data"]:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
def test_filter_recent_messages_by_session(self, WebApiAuth):
memory_id = self.memory_id
session_ids = self.session_ids
session_id = random.choice(session_ids)
res = get_recent_message(WebApiAuth, params={"session_id": session_id, "memory_id": memory_id})
assert res["code"] == 0, res
for message in res["data"]:
assert message["session_id"] == session_id, message
@pytest.mark.p2
def test_get_recent_messages_missing_memory_id(self, WebApiAuth):
res = get_recent_message(WebApiAuth, params={})
assert res["code"] == 101, res
assert "memory_ids is required" in res["message"], res
@pytest.mark.p2
def test_get_recent_messages_csv_memory_ids(self, WebApiAuth):
memory_id = self.memory_id
res = get_recent_message(WebApiAuth, params={"memory_id": f"{memory_id},{memory_id}"})
assert res["code"] == 0, res
assert isinstance(res["data"], list), res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_get_recent_message.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_message_app/test_list_message.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import random
import pytest
from test_web_api.common import list_memory_message
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = list_memory_message(invalid_auth, "")
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
class TestMessageList:
@pytest.mark.p2
def test_params_unset(self, WebApiAuth):
memory_id = self.memory_id
res = list_memory_message(WebApiAuth, memory_id, params=None)
assert res["code"] == 0, res
assert len(res["data"]["messages"]["message_list"]) == 5, res
@pytest.mark.p2
def test_params_empty(self, WebApiAuth):
memory_id = self.memory_id
res = list_memory_message(WebApiAuth, memory_id, params={})
assert res["code"] == 0, res
assert len(res["data"]["messages"]["message_list"]) == 5, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 1, "page_size": 10}, 5),
({"page": 2, "page_size": 10}, 0),
({"page": 1, "page_size": 2}, 2),
({"page": 3, "page_size": 2}, 1),
({"page": 5, "page_size": 10}, 0),
],
ids=["normal_first_page", "beyond_max_page", "normal_last_partial_page", "normal_middle_page",
"full_data_single_page"],
)
def test_page_size(self, WebApiAuth, params, expected_page_size):
# have added 5 messages in fixture
memory_id = self.memory_id
res = list_memory_message(WebApiAuth, memory_id, params=params)
assert res["code"] == 0, res
assert len(res["data"]["messages"]["message_list"]) == expected_page_size, res
@pytest.mark.p2
def test_filter_agent_id(self, WebApiAuth):
memory_id = self.memory_id
agent_ids = self.agent_ids
agent_id = random.choice(agent_ids)
res = list_memory_message(WebApiAuth, memory_id, params={"agent_id": agent_id})
assert res["code"] == 0, res
for message in res["data"]["messages"]["message_list"]:
assert message["agent_id"] == agent_id, message
@pytest.mark.p2
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="Not support.")
def test_search_keyword(self, WebApiAuth):
memory_id = self.memory_id
session_ids = self.session_ids
session_id = random.choice(session_ids)
slice_start = random.randint(0, len(session_id) - 2)
slice_end = random.randint(slice_start + 1, len(session_id) - 1)
keyword = session_id[slice_start:slice_end]
res = list_memory_message(WebApiAuth, memory_id, params={"keywords": keyword})
assert res["code"] == 0, res
assert len(res["data"]["messages"]["message_list"]) > 0, res
for message in res["data"]["messages"]["message_list"]:
assert keyword in message["session_id"], message
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_message_app/test_list_message.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:common/data_source/bitbucket/connector.py | from __future__ import annotations
import copy
from collections.abc import Callable
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
from typing import TYPE_CHECKING
from typing_extensions import override
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.config import DocumentSource
from common.data_source.config import REQUEST_TIMEOUT_SECONDS
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import CheckpointedConnector
from common.data_source.interfaces import CheckpointOutput
from common.data_source.interfaces import IndexingHeartbeatInterface
from common.data_source.interfaces import SecondsSinceUnixEpoch
from common.data_source.interfaces import SlimConnectorWithPermSync
from common.data_source.models import ConnectorCheckpoint
from common.data_source.models import ConnectorFailure
from common.data_source.models import DocumentFailure
from common.data_source.models import SlimDocument
from common.data_source.bitbucket.utils import (
build_auth_client,
list_repositories,
map_pr_to_document,
paginate,
PR_LIST_RESPONSE_FIELDS,
SLIM_PR_LIST_RESPONSE_FIELDS,
)
if TYPE_CHECKING:
import httpx
class BitbucketConnectorCheckpoint(ConnectorCheckpoint):
"""Checkpoint state for resumable Bitbucket PR indexing.
Fields:
repos_queue: Materialized list of repository slugs to process.
current_repo_index: Index of the repository currently being processed.
next_url: Bitbucket "next" URL for continuing pagination within the current repo.
"""
repos_queue: list[str] = []
current_repo_index: int = 0
next_url: str | None = None
class BitbucketConnector(
CheckpointedConnector[BitbucketConnectorCheckpoint],
SlimConnectorWithPermSync,
):
"""Connector for indexing Bitbucket Cloud pull requests.
Args:
workspace: Bitbucket workspace ID.
repositories: Comma-separated list of repository slugs to index.
projects: Comma-separated list of project keys to index all repositories within.
batch_size: Max number of documents to yield per batch.
"""
def __init__(
self,
workspace: str,
repositories: str | None = None,
projects: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
self.workspace = workspace
self._repositories = (
[s.strip() for s in repositories.split(",") if s.strip()]
if repositories
else None
)
self._projects: list[str] | None = (
[s.strip() for s in projects.split(",") if s.strip()] if projects else None
)
self.batch_size = batch_size
self.email: str | None = None
self.api_token: str | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load API token-based credentials.
Expects a dict with keys: `bitbucket_email`, `bitbucket_api_token`.
"""
self.email = credentials.get("bitbucket_email")
self.api_token = credentials.get("bitbucket_api_token")
if not self.email or not self.api_token:
raise ConnectorMissingCredentialError("Bitbucket")
return None
def _client(self) -> httpx.Client:
"""Build an authenticated HTTP client or raise if credentials missing."""
if not self.email or not self.api_token:
raise ConnectorMissingCredentialError("Bitbucket")
return build_auth_client(self.email, self.api_token)
def _iter_pull_requests_for_repo(
self,
client: httpx.Client,
repo_slug: str,
params: dict[str, Any] | None = None,
start_url: str | None = None,
on_page: Callable[[str | None], None] | None = None,
) -> Iterator[dict[str, Any]]:
base = f"https://api.bitbucket.org/2.0/repositories/{self.workspace}/{repo_slug}/pullrequests"
yield from paginate(
client,
base,
params,
start_url=start_url,
on_page=on_page,
)
def _build_params(
self,
fields: str = PR_LIST_RESPONSE_FIELDS,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> dict[str, Any]:
"""Build Bitbucket fetch params.
Always include OPEN, MERGED, and DECLINED PRs. If both ``start`` and
``end`` are provided, apply a single updated_on time window.
"""
def _iso(ts: SecondsSinceUnixEpoch) -> str:
return datetime.fromtimestamp(ts, tz=timezone.utc).isoformat()
def _tc_epoch(
lower_epoch: SecondsSinceUnixEpoch | None,
upper_epoch: SecondsSinceUnixEpoch | None,
) -> str | None:
if lower_epoch is not None and upper_epoch is not None:
lower_iso = _iso(lower_epoch)
upper_iso = _iso(upper_epoch)
return f'(updated_on > "{lower_iso}" AND updated_on <= "{upper_iso}")'
return None
params: dict[str, Any] = {"fields": fields, "pagelen": 50}
time_clause = _tc_epoch(start, end)
q = '(state = "OPEN" OR state = "MERGED" OR state = "DECLINED")'
if time_clause:
q = f"{q} AND {time_clause}"
params["q"] = q
return params
def _iter_target_repositories(self, client: httpx.Client) -> Iterator[str]:
"""Yield repository slugs based on configuration.
Priority:
- repositories list
- projects list (list repos by project key)
- workspace (all repos)
"""
if self._repositories:
for slug in self._repositories:
yield slug
return
if self._projects:
for project_key in self._projects:
for repo in list_repositories(client, self.workspace, project_key):
slug_val = repo.get("slug")
if isinstance(slug_val, str) and slug_val:
yield slug_val
return
for repo in list_repositories(client, self.workspace, None):
slug_val = repo.get("slug")
if isinstance(slug_val, str) and slug_val:
yield slug_val
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: BitbucketConnectorCheckpoint,
) -> CheckpointOutput[BitbucketConnectorCheckpoint]:
"""Resumable PR ingestion across repos and pages within a time window.
Yields Documents (or ConnectorFailure for per-PR mapping failures) and returns
an updated checkpoint that records repo position and next page URL.
"""
new_checkpoint = copy.deepcopy(checkpoint)
with self._client() as client:
# Materialize target repositories once
if not new_checkpoint.repos_queue:
# Preserve explicit order; otherwise ensure deterministic ordering
repos_list = list(self._iter_target_repositories(client))
new_checkpoint.repos_queue = sorted(set(repos_list))
new_checkpoint.current_repo_index = 0
new_checkpoint.next_url = None
repos = new_checkpoint.repos_queue
if not repos or new_checkpoint.current_repo_index >= len(repos):
new_checkpoint.has_more = False
return new_checkpoint
repo_slug = repos[new_checkpoint.current_repo_index]
first_page_params = self._build_params(
fields=PR_LIST_RESPONSE_FIELDS,
start=start,
end=end,
)
def _on_page(next_url: str | None) -> None:
new_checkpoint.next_url = next_url
for pr in self._iter_pull_requests_for_repo(
client,
repo_slug,
params=first_page_params,
start_url=new_checkpoint.next_url,
on_page=_on_page,
):
try:
document = map_pr_to_document(pr, self.workspace, repo_slug)
yield document
except Exception as e:
pr_id = pr.get("id")
pr_link = (
f"https://bitbucket.org/{self.workspace}/{repo_slug}/pull-requests/{pr_id}"
if pr_id is not None
else None
)
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=(
f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{repo_slug}:pr:{pr_id}"
if pr_id is not None
else f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{repo_slug}:pr:unknown"
),
document_link=pr_link,
),
failure_message=f"Failed to process Bitbucket PR: {e}",
exception=e,
)
# Advance to next repository (if any) and set has_more accordingly
new_checkpoint.current_repo_index += 1
new_checkpoint.next_url = None
new_checkpoint.has_more = new_checkpoint.current_repo_index < len(repos)
return new_checkpoint
@override
def build_dummy_checkpoint(self) -> BitbucketConnectorCheckpoint:
"""Create an initial checkpoint with work remaining."""
return BitbucketConnectorCheckpoint(has_more=True)
@override
def validate_checkpoint_json(
self, checkpoint_json: str
) -> BitbucketConnectorCheckpoint:
"""Validate and deserialize a checkpoint instance from JSON."""
return BitbucketConnectorCheckpoint.model_validate_json(checkpoint_json)
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> Iterator[list[SlimDocument]]:
"""Return only document IDs for all existing pull requests."""
batch: list[SlimDocument] = []
params = self._build_params(
fields=SLIM_PR_LIST_RESPONSE_FIELDS,
start=start,
end=end,
)
with self._client() as client:
for slug in self._iter_target_repositories(client):
for pr in self._iter_pull_requests_for_repo(
client, slug, params=params
):
pr_id = pr["id"]
doc_id = f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{slug}:pr:{pr_id}"
batch.append(SlimDocument(id=doc_id))
if len(batch) >= self.batch_size:
yield batch
batch = []
if callback:
if callback.should_stop():
# Note: this is not actually used for permission sync yet, just pruning
raise RuntimeError(
"bitbucket_pr_sync: Stop signal detected"
)
callback.progress("bitbucket_pr_sync", len(batch))
if batch:
yield batch
def validate_connector_settings(self) -> None:
"""Validate Bitbucket credentials and workspace access by probing a lightweight endpoint.
Raises:
CredentialExpiredError: on HTTP 401
InsufficientPermissionsError: on HTTP 403
UnexpectedValidationError: on any other failure
"""
try:
with self._client() as client:
url = f"https://api.bitbucket.org/2.0/repositories/{self.workspace}"
resp = client.get(
url,
params={"pagelen": 1, "fields": "pagelen"},
timeout=REQUEST_TIMEOUT_SECONDS,
)
if resp.status_code == 401:
raise CredentialExpiredError(
"Invalid or expired Bitbucket credentials (HTTP 401)."
)
if resp.status_code == 403:
raise InsufficientPermissionsError(
"Insufficient permissions to access Bitbucket workspace (HTTP 403)."
)
if resp.status_code < 200 or resp.status_code >= 300:
raise UnexpectedValidationError(
f"Unexpected Bitbucket error (status={resp.status_code})."
)
except Exception as e:
# Network or other unexpected errors
if isinstance(
e,
(
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
ConnectorMissingCredentialError,
),
):
raise
raise UnexpectedValidationError(
f"Unexpected error while validating Bitbucket settings: {e}"
)
if __name__ == "__main__":
bitbucket = BitbucketConnector(
workspace="<YOUR_WORKSPACE>"
)
bitbucket.load_credentials({
"bitbucket_email": "<YOUR_EMAIL>",
"bitbucket_api_token": "<YOUR_API_TOKEN>",
})
bitbucket.validate_connector_settings()
print("Credentials validated successfully.")
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
end_time = datetime.now(timezone.utc)
for doc_batch in bitbucket.retrieve_all_slim_docs_perm_sync(
start=start_time.timestamp(),
end=end_time.timestamp(),
):
for doc in doc_batch:
print(doc)
bitbucket_checkpoint = bitbucket.build_dummy_checkpoint()
while bitbucket_checkpoint.has_more:
gen = bitbucket.load_from_checkpoint(
start=start_time.timestamp(),
end=end_time.timestamp(),
checkpoint=bitbucket_checkpoint,
)
while True:
try:
doc = next(gen)
print(doc)
except StopIteration as e:
bitbucket_checkpoint = e.value
break
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/bitbucket/connector.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/bitbucket/utils.py | from __future__ import annotations
import time
from collections.abc import Callable
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
import httpx
from common.data_source.config import REQUEST_TIMEOUT_SECONDS, DocumentSource
from common.data_source.cross_connector_utils.rate_limit_wrapper import (
rate_limit_builder,
)
from common.data_source.utils import sanitize_filename
from common.data_source.models import BasicExpertInfo, Document
from common.data_source.cross_connector_utils.retry_wrapper import retry_builder
# Fields requested from Bitbucket PR list endpoint to ensure rich PR data
PR_LIST_RESPONSE_FIELDS: str = ",".join(
[
"next",
"page",
"pagelen",
"values.author",
"values.close_source_branch",
"values.closed_by",
"values.comment_count",
"values.created_on",
"values.description",
"values.destination",
"values.draft",
"values.id",
"values.links",
"values.merge_commit",
"values.participants",
"values.reason",
"values.rendered",
"values.reviewers",
"values.source",
"values.state",
"values.summary",
"values.task_count",
"values.title",
"values.type",
"values.updated_on",
]
)
# Minimal fields for slim retrieval (IDs only)
SLIM_PR_LIST_RESPONSE_FIELDS: str = ",".join(
[
"next",
"page",
"pagelen",
"values.id",
]
)
# Minimal fields for repository list calls
REPO_LIST_RESPONSE_FIELDS: str = ",".join(
[
"next",
"page",
"pagelen",
"values.slug",
"values.full_name",
"values.project.key",
]
)
class BitbucketRetriableError(Exception):
"""Raised for retriable Bitbucket conditions (429, 5xx)."""
class BitbucketNonRetriableError(Exception):
"""Raised for non-retriable Bitbucket client errors (4xx except 429)."""
@retry_builder(
tries=6,
delay=1,
backoff=2,
max_delay=30,
exceptions=(BitbucketRetriableError, httpx.RequestError),
)
@rate_limit_builder(max_calls=60, period=60)
def bitbucket_get(
client: httpx.Client, url: str, params: dict[str, Any] | None = None
) -> httpx.Response:
"""Perform a GET against Bitbucket with retry and rate limiting.
Retries on 429 and 5xx responses, and on transport errors. Honors
`Retry-After` header for 429 when present by sleeping before retrying.
"""
try:
response = client.get(url, params=params, timeout=REQUEST_TIMEOUT_SECONDS)
except httpx.RequestError:
# Allow retry_builder to handle retries of transport errors
raise
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
status = e.response.status_code if e.response is not None else None
if status == 429:
retry_after = e.response.headers.get("Retry-After") if e.response else None
if retry_after is not None:
try:
time.sleep(int(retry_after))
except (TypeError, ValueError):
pass
raise BitbucketRetriableError("Bitbucket rate limit exceeded (429)") from e
if status is not None and 500 <= status < 600:
raise BitbucketRetriableError(f"Bitbucket server error: {status}") from e
if status is not None and 400 <= status < 500:
raise BitbucketNonRetriableError(f"Bitbucket client error: {status}") from e
# Unknown status, propagate
raise
return response
def build_auth_client(email: str, api_token: str) -> httpx.Client:
"""Create an authenticated httpx client for Bitbucket Cloud API."""
return httpx.Client(auth=(email, api_token), http2=True)
def paginate(
client: httpx.Client,
url: str,
params: dict[str, Any] | None = None,
start_url: str | None = None,
on_page: Callable[[str | None], None] | None = None,
) -> Iterator[dict[str, Any]]:
"""Iterate over paginated Bitbucket API responses yielding individual values.
Args:
client: Authenticated HTTP client.
url: Base collection URL (first page when start_url is None).
params: Query params for the first page.
start_url: If provided, start from this absolute URL (ignores params).
on_page: Optional callback invoked after each page with the next page URL.
"""
next_url = start_url or url
# If resuming from a next URL, do not pass params again
query = params.copy() if params else None
query = None if start_url else query
while next_url:
resp = bitbucket_get(client, next_url, params=query)
data = resp.json()
values = data.get("values", [])
for item in values:
yield item
next_url = data.get("next")
if on_page is not None:
on_page(next_url)
# only include params on first call, next_url will contain all necessary params
query = None
def list_repositories(
client: httpx.Client, workspace: str, project_key: str | None = None
) -> Iterator[dict[str, Any]]:
"""List repositories in a workspace, optionally filtered by project key."""
base_url = f"https://api.bitbucket.org/2.0/repositories/{workspace}"
params: dict[str, Any] = {
"fields": REPO_LIST_RESPONSE_FIELDS,
"pagelen": 100,
# Ensure deterministic ordering
"sort": "full_name",
}
if project_key:
params["q"] = f'project.key="{project_key}"'
yield from paginate(client, base_url, params)
def map_pr_to_document(pr: dict[str, Any], workspace: str, repo_slug: str) -> Document:
"""Map a Bitbucket pull request JSON to Onyx Document."""
pr_id = pr["id"]
title = pr.get("title") or f"PR {pr_id}"
description = pr.get("description") or ""
state = pr.get("state")
draft = pr.get("draft", False)
author = pr.get("author", {})
reviewers = pr.get("reviewers", [])
participants = pr.get("participants", [])
link = pr.get("links", {}).get("html", {}).get("href") or (
f"https://bitbucket.org/{workspace}/{repo_slug}/pull-requests/{pr_id}"
)
created_on = pr.get("created_on")
updated_on = pr.get("updated_on")
updated_dt = (
datetime.fromisoformat(updated_on.replace("Z", "+00:00")).astimezone(
timezone.utc
)
if isinstance(updated_on, str)
else None
)
source_branch = pr.get("source", {}).get("branch", {}).get("name", "")
destination_branch = pr.get("destination", {}).get("branch", {}).get("name", "")
approved_by = [
_get_user_name(p.get("user", {})) for p in participants if p.get("approved")
]
primary_owner = None
if author:
primary_owner = BasicExpertInfo(
display_name=_get_user_name(author),
)
# secondary_owners = [
# BasicExpertInfo(display_name=_get_user_name(r)) for r in reviewers
# ] or None
reviewer_names = [_get_user_name(r) for r in reviewers]
# Create a concise summary of key PR info
created_date = created_on.split("T")[0] if created_on else "N/A"
updated_date = updated_on.split("T")[0] if updated_on else "N/A"
content_text = (
"Pull Request Information:\n"
f"- Pull Request ID: {pr_id}\n"
f"- Title: {title}\n"
f"- State: {state or 'N/A'} {'(Draft)' if draft else ''}\n"
)
if state == "DECLINED":
content_text += f"- Reason: {pr.get('reason', 'N/A')}\n"
content_text += (
f"- Author: {_get_user_name(author) if author else 'N/A'}\n"
f"- Reviewers: {', '.join(reviewer_names) if reviewer_names else 'N/A'}\n"
f"- Branch: {source_branch} -> {destination_branch}\n"
f"- Created: {created_date}\n"
f"- Updated: {updated_date}"
)
if description:
content_text += f"\n\nDescription:\n{description}"
metadata: dict[str, str | list[str]] = {
"object_type": "PullRequest",
"workspace": workspace,
"repository": repo_slug,
"pr_key": f"{workspace}/{repo_slug}#{pr_id}",
"id": str(pr_id),
"title": title,
"state": state or "",
"draft": str(bool(draft)),
"link": link,
"author": _get_user_name(author) if author else "",
"reviewers": reviewer_names,
"approved_by": approved_by,
"comment_count": str(pr.get("comment_count", "")),
"task_count": str(pr.get("task_count", "")),
"created_on": created_on or "",
"updated_on": updated_on or "",
"source_branch": source_branch,
"destination_branch": destination_branch,
"closed_by": (
_get_user_name(pr.get("closed_by", {})) if pr.get("closed_by") else ""
),
"close_source_branch": str(bool(pr.get("close_source_branch", False))),
}
name = sanitize_filename(title, "md")
return Document(
id=f"{DocumentSource.BITBUCKET.value}:{workspace}:{repo_slug}:pr:{pr_id}",
blob=content_text.encode("utf-8"),
source=DocumentSource.BITBUCKET,
extension=".md",
semantic_identifier=f"#{pr_id}: {name}",
size_bytes=len(content_text.encode("utf-8")),
doc_updated_at=updated_dt,
primary_owners=[primary_owner] if primary_owner else None,
# secondary_owners=secondary_owners,
metadata=metadata,
)
def _get_user_name(user: dict[str, Any]) -> str:
return user.get("display_name") or user.get("nickname") or "unknown" | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/bitbucket/utils.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/cross_connector_utils/rate_limit_wrapper.py | import time
import logging
from collections.abc import Callable
from functools import wraps
from typing import Any
from typing import cast
from typing import TypeVar
import requests
F = TypeVar("F", bound=Callable[..., Any])
class RateLimitTriedTooManyTimesError(Exception):
pass
class _RateLimitDecorator:
"""Builds a generic wrapper/decorator for calls to external APIs that
prevents making more than `max_calls` requests per `period`
Implementation inspired by the `ratelimit` library:
https://github.com/tomasbasham/ratelimit.
NOTE: is not thread safe.
"""
def __init__(
self,
max_calls: int,
period: float, # in seconds
sleep_time: float = 2, # in seconds
sleep_backoff: float = 2, # applies exponential backoff
max_num_sleep: int = 0,
):
self.max_calls = max_calls
self.period = period
self.sleep_time = sleep_time
self.sleep_backoff = sleep_backoff
self.max_num_sleep = max_num_sleep
self.call_history: list[float] = []
self.curr_calls = 0
def __call__(self, func: F) -> F:
@wraps(func)
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
# cleanup calls which are no longer relevant
self._cleanup()
# check if we've exceeded the rate limit
sleep_cnt = 0
while len(self.call_history) == self.max_calls:
sleep_time = self.sleep_time * (self.sleep_backoff**sleep_cnt)
logging.warning(
f"Rate limit exceeded for function {func.__name__}. "
f"Waiting {sleep_time} seconds before retrying."
)
time.sleep(sleep_time)
sleep_cnt += 1
if self.max_num_sleep != 0 and sleep_cnt >= self.max_num_sleep:
raise RateLimitTriedTooManyTimesError(
f"Exceeded '{self.max_num_sleep}' retries for function '{func.__name__}'"
)
self._cleanup()
# add the current call to the call history
self.call_history.append(time.monotonic())
return func(*args, **kwargs)
return cast(F, wrapped_func)
def _cleanup(self) -> None:
curr_time = time.monotonic()
time_to_expire_before = curr_time - self.period
self.call_history = [
call_time
for call_time in self.call_history
if call_time > time_to_expire_before
]
rate_limit_builder = _RateLimitDecorator
"""If you want to allow the external service to tell you when you've hit the rate limit,
use the following instead"""
R = TypeVar("R", bound=Callable[..., requests.Response])
def wrap_request_to_handle_ratelimiting(
request_fn: R, default_wait_time_sec: int = 30, max_waits: int = 30
) -> R:
def wrapped_request(*args: list, **kwargs: dict[str, Any]) -> requests.Response:
for _ in range(max_waits):
response = request_fn(*args, **kwargs)
if response.status_code == 429:
try:
wait_time = int(
response.headers.get("Retry-After", default_wait_time_sec)
)
except ValueError:
wait_time = default_wait_time_sec
time.sleep(wait_time)
continue
return response
raise RateLimitTriedTooManyTimesError(f"Exceeded '{max_waits}' retries")
return cast(R, wrapped_request)
_rate_limited_get = wrap_request_to_handle_ratelimiting(requests.get)
_rate_limited_post = wrap_request_to_handle_ratelimiting(requests.post)
class _RateLimitedRequest:
get = _rate_limited_get
post = _rate_limited_post
rl_requests = _RateLimitedRequest | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/cross_connector_utils/rate_limit_wrapper.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/cross_connector_utils/retry_wrapper.py | from collections.abc import Callable
import logging
from logging import Logger
from typing import Any
from typing import cast
from typing import TypeVar
import requests
from retry import retry
from common.data_source.config import REQUEST_TIMEOUT_SECONDS
F = TypeVar("F", bound=Callable[..., Any])
logger = logging.getLogger(__name__)
def retry_builder(
tries: int = 20,
delay: float = 0.1,
max_delay: float | None = 60,
backoff: float = 2,
jitter: tuple[float, float] | float = 1,
exceptions: type[Exception] | tuple[type[Exception], ...] = (Exception,),
) -> Callable[[F], F]:
"""Builds a generic wrapper/decorator for calls to external APIs that
may fail due to rate limiting, flakes, or other reasons. Applies exponential
backoff with jitter to retry the call."""
def retry_with_default(func: F) -> F:
@retry(
tries=tries,
delay=delay,
max_delay=max_delay,
backoff=backoff,
jitter=jitter,
logger=cast(Logger, logger),
exceptions=exceptions,
)
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
return func(*args, **kwargs)
return cast(F, wrapped_func)
return retry_with_default
def request_with_retries(
method: str,
url: str,
*,
data: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
params: dict[str, Any] | None = None,
timeout: int = REQUEST_TIMEOUT_SECONDS,
stream: bool = False,
tries: int = 8,
delay: float = 1,
backoff: float = 2,
) -> requests.Response:
@retry(tries=tries, delay=delay, backoff=backoff, logger=cast(Logger, logger))
def _make_request() -> requests.Response:
response = requests.request(
method=method,
url=url,
data=data,
headers=headers,
params=params,
timeout=timeout,
stream=stream,
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception(
"Request failed:\n%s",
{
"method": method,
"url": url,
"data": data,
"headers": headers,
"params": params,
"timeout": timeout,
"stream": stream,
},
)
raise
return response
return _make_request() | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/cross_connector_utils/retry_wrapper.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/zendesk_connector.py | import copy
import logging
import time
from collections.abc import Callable
from collections.abc import Iterator
from typing import Any
import requests
from pydantic import BaseModel
from requests.exceptions import HTTPError
from typing_extensions import override
from common.data_source.config import ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS, DocumentSource
from common.data_source.exceptions import ConnectorValidationError, CredentialExpiredError, InsufficientPermissionsError
from common.data_source.html_utils import parse_html_page_basic
from common.data_source.interfaces import CheckpointOutput, CheckpointOutputWrapper, CheckpointedConnector, IndexingHeartbeatInterface, SlimConnectorWithPermSync
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, ConnectorFailure, Document, DocumentFailure, GenerateSlimDocumentOutput, SecondsSinceUnixEpoch, SlimDocument
from common.data_source.utils import retry_builder, time_str_to_utc,rate_limit_builder
MAX_PAGE_SIZE = 30 # Zendesk API maximum
MAX_AUTHOR_MAP_SIZE = 50_000 # Reset author map cache if it gets too large
_SLIM_BATCH_SIZE = 1000
class ZendeskCredentialsNotSetUpError(PermissionError):
def __init__(self) -> None:
super().__init__(
"Zendesk Credentials are not set up, was load_credentials called?"
)
class ZendeskClient:
def __init__(
self,
subdomain: str,
email: str,
token: str,
calls_per_minute: int | None = None,
):
self.base_url = f"https://{subdomain}.zendesk.com/api/v2"
self.auth = (f"{email}/token", token)
self.make_request = request_with_rate_limit(self, calls_per_minute)
def request_with_rate_limit(
client: ZendeskClient, max_calls_per_minute: int | None = None
) -> Callable[[str, dict[str, Any]], dict[str, Any]]:
@retry_builder()
@(
rate_limit_builder(max_calls=max_calls_per_minute, period=60)
if max_calls_per_minute
else lambda x: x
)
def make_request(endpoint: str, params: dict[str, Any]) -> dict[str, Any]:
response = requests.get(
f"{client.base_url}/{endpoint}", auth=client.auth, params=params
)
if response.status_code == 429:
retry_after = response.headers.get("Retry-After")
if retry_after is not None:
# Sleep for the duration indicated by the Retry-After header
time.sleep(int(retry_after))
elif (
response.status_code == 403
and response.json().get("error") == "SupportProductInactive"
):
return response.json()
response.raise_for_status()
return response.json()
return make_request
class ZendeskPageResponse(BaseModel):
data: list[dict[str, Any]]
meta: dict[str, Any]
has_more: bool
def _get_content_tag_mapping(client: ZendeskClient) -> dict[str, str]:
content_tags: dict[str, str] = {}
params = {"page[size]": MAX_PAGE_SIZE}
try:
while True:
data = client.make_request("guide/content_tags", params)
for tag in data.get("records", []):
content_tags[tag["id"]] = tag["name"]
# Check if there are more pages
if data.get("meta", {}).get("has_more", False):
params["page[after]"] = data["meta"]["after_cursor"]
else:
break
return content_tags
except Exception as e:
raise Exception(f"Error fetching content tags: {str(e)}")
def _get_articles(
client: ZendeskClient, start_time: int | None = None, page_size: int = MAX_PAGE_SIZE
) -> Iterator[dict[str, Any]]:
params = {"page[size]": page_size, "sort_by": "updated_at", "sort_order": "asc"}
if start_time is not None:
params["start_time"] = start_time
while True:
data = client.make_request("help_center/articles", params)
for article in data["articles"]:
yield article
if not data.get("meta", {}).get("has_more"):
break
params["page[after]"] = data["meta"]["after_cursor"]
def _get_article_page(
client: ZendeskClient,
start_time: int | None = None,
after_cursor: str | None = None,
page_size: int = MAX_PAGE_SIZE,
) -> ZendeskPageResponse:
params = {"page[size]": page_size, "sort_by": "updated_at", "sort_order": "asc"}
if start_time is not None:
params["start_time"] = start_time
if after_cursor is not None:
params["page[after]"] = after_cursor
data = client.make_request("help_center/articles", params)
return ZendeskPageResponse(
data=data["articles"],
meta=data["meta"],
has_more=bool(data["meta"].get("has_more", False)),
)
def _get_tickets(
client: ZendeskClient, start_time: int | None = None
) -> Iterator[dict[str, Any]]:
params = {"start_time": start_time or 0}
while True:
data = client.make_request("incremental/tickets.json", params)
for ticket in data["tickets"]:
yield ticket
if not data.get("end_of_stream", False):
params["start_time"] = data["end_time"]
else:
break
# TODO: maybe these don't need to be their own functions?
def _get_tickets_page(
client: ZendeskClient, start_time: int | None = None
) -> ZendeskPageResponse:
params = {"start_time": start_time or 0}
# NOTE: for some reason zendesk doesn't seem to be respecting the start_time param
# in my local testing with very few tickets. We'll look into it if this becomes an
# issue in larger deployments
data = client.make_request("incremental/tickets.json", params)
if data.get("error") == "SupportProductInactive":
raise ValueError(
"Zendesk Support Product is not active for this account, No tickets to index"
)
return ZendeskPageResponse(
data=data["tickets"],
meta={"end_time": data["end_time"]},
has_more=not bool(data.get("end_of_stream", False)),
)
def _fetch_author(
client: ZendeskClient, author_id: str | int
) -> BasicExpertInfo | None:
# Skip fetching if author_id is invalid
# cast to str to avoid issues with zendesk changing their types
if not author_id or str(author_id) == "-1":
return None
try:
author_data = client.make_request(f"users/{author_id}", {})
user = author_data.get("user")
return (
BasicExpertInfo(display_name=user.get("name"), email=user.get("email"))
if user and user.get("name") and user.get("email")
else None
)
except requests.exceptions.HTTPError:
# Handle any API errors gracefully
return None
def _article_to_document(
article: dict[str, Any],
content_tags: dict[str, str],
author_map: dict[str, BasicExpertInfo],
client: ZendeskClient,
) -> tuple[dict[str, BasicExpertInfo] | None, Document]:
author_id = article.get("author_id")
if not author_id:
author = None
else:
author = (
author_map.get(author_id)
if author_id in author_map
else _fetch_author(client, author_id)
)
new_author_mapping = {author_id: author} if author_id and author else None
updated_at = article.get("updated_at")
update_time = time_str_to_utc(updated_at) if updated_at else None
text = parse_html_page_basic(article.get("body") or "")
blob = text.encode("utf-8", errors="replace")
# Build metadata
metadata: dict[str, str | list[str]] = {
"labels": [str(label) for label in article.get("label_names", []) if label],
"content_tags": [
content_tags[tag_id]
for tag_id in article.get("content_tag_ids", [])
if tag_id in content_tags
],
}
# Remove empty values
metadata = {k: v for k, v in metadata.items() if v}
return new_author_mapping, Document(
id=f"article:{article['id']}",
source=DocumentSource.ZENDESK,
semantic_identifier=article["title"],
extension=".txt",
blob=blob,
size_bytes=len(blob),
doc_updated_at=update_time,
primary_owners=[author] if author else None,
metadata=metadata,
)
def _get_comment_text(
comment: dict[str, Any],
author_map: dict[str, BasicExpertInfo],
client: ZendeskClient,
) -> tuple[dict[str, BasicExpertInfo] | None, str]:
author_id = comment.get("author_id")
if not author_id:
author = None
else:
author = (
author_map.get(author_id)
if author_id in author_map
else _fetch_author(client, author_id)
)
new_author_mapping = {author_id: author} if author_id and author else None
comment_text = f"Comment{' by ' + author.display_name if author and author.display_name else ''}"
comment_text += f"{' at ' + comment['created_at'] if comment.get('created_at') else ''}:\n{comment['body']}"
return new_author_mapping, comment_text
def _ticket_to_document(
ticket: dict[str, Any],
author_map: dict[str, BasicExpertInfo],
client: ZendeskClient,
) -> tuple[dict[str, BasicExpertInfo] | None, Document]:
submitter_id = ticket.get("submitter")
if not submitter_id:
submitter = None
else:
submitter = (
author_map.get(submitter_id)
if submitter_id in author_map
else _fetch_author(client, submitter_id)
)
new_author_mapping = (
{submitter_id: submitter} if submitter_id and submitter else None
)
updated_at = ticket.get("updated_at")
update_time = time_str_to_utc(updated_at) if updated_at else None
metadata: dict[str, str | list[str]] = {}
if status := ticket.get("status"):
metadata["status"] = status
if priority := ticket.get("priority"):
metadata["priority"] = priority
if tags := ticket.get("tags"):
metadata["tags"] = tags
if ticket_type := ticket.get("type"):
metadata["ticket_type"] = ticket_type
# Fetch comments for the ticket
comments_data = client.make_request(f"tickets/{ticket.get('id')}/comments", {})
comments = comments_data.get("comments", [])
comment_texts = []
for comment in comments:
new_author_mapping, comment_text = _get_comment_text(
comment, author_map, client
)
if new_author_mapping:
author_map.update(new_author_mapping)
comment_texts.append(comment_text)
comments_text = "\n\n".join(comment_texts)
subject = ticket.get("subject")
full_text = f"Ticket Subject:\n{subject}\n\nComments:\n{comments_text}"
blob = full_text.encode("utf-8", errors="replace")
return new_author_mapping, Document(
id=f"zendesk_ticket_{ticket['id']}",
blob=blob,
extension=".txt",
size_bytes=len(blob),
source=DocumentSource.ZENDESK,
semantic_identifier=f"Ticket #{ticket['id']}: {subject or 'No Subject'}",
doc_updated_at=update_time,
primary_owners=[submitter] if submitter else None,
metadata=metadata,
)
class ZendeskConnectorCheckpoint(ConnectorCheckpoint):
# We use cursor-based paginated retrieval for articles
after_cursor_articles: str | None
# We use timestamp-based paginated retrieval for tickets
next_start_time_tickets: int | None
cached_author_map: dict[str, BasicExpertInfo] | None
cached_content_tags: dict[str, str] | None
class ZendeskConnector(
SlimConnectorWithPermSync, CheckpointedConnector[ZendeskConnectorCheckpoint]
):
def __init__(
self,
content_type: str = "articles",
calls_per_minute: int | None = None,
) -> None:
self.content_type = content_type
self.subdomain = ""
# Fetch all tags ahead of time
self.content_tags: dict[str, str] = {}
self.calls_per_minute = calls_per_minute
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
# Subdomain is actually the whole URL
subdomain = (
credentials["zendesk_subdomain"]
.replace("https://", "")
.split(".zendesk.com")[0]
)
self.subdomain = subdomain
self.client = ZendeskClient(
subdomain,
credentials["zendesk_email"],
credentials["zendesk_token"],
calls_per_minute=self.calls_per_minute,
)
return None
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ZendeskConnectorCheckpoint,
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
if checkpoint.cached_content_tags is None:
checkpoint.cached_content_tags = _get_content_tag_mapping(self.client)
return checkpoint # save the content tags to the checkpoint
self.content_tags = checkpoint.cached_content_tags
if self.content_type == "articles":
checkpoint = yield from self._retrieve_articles(start, end, checkpoint)
return checkpoint
elif self.content_type == "tickets":
checkpoint = yield from self._retrieve_tickets(start, end, checkpoint)
return checkpoint
else:
raise ValueError(f"Unsupported content_type: {self.content_type}")
def _retrieve_articles(
self,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
checkpoint: ZendeskConnectorCheckpoint,
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
checkpoint = copy.deepcopy(checkpoint)
# This one is built on the fly as there may be more many more authors than tags
author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
after_cursor = checkpoint.after_cursor_articles
doc_batch: list[Document] = []
response = _get_article_page(
self.client,
start_time=int(start) if start else None,
after_cursor=after_cursor,
)
articles = response.data
has_more = response.has_more
after_cursor = response.meta.get("after_cursor")
for article in articles:
if (
article.get("body") is None
or article.get("draft")
or any(
label in ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS
for label in article.get("label_names", [])
)
):
continue
try:
new_author_map, document = _article_to_document(
article, self.content_tags, author_map, self.client
)
except Exception as e:
logging.error(f"Error processing article {article['id']}: {e}")
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=f"{article.get('id')}",
document_link=article.get("html_url", ""),
),
failure_message=str(e),
exception=e,
)
continue
if new_author_map:
author_map.update(new_author_map)
updated_at = document.doc_updated_at
updated_ts = updated_at.timestamp() if updated_at else None
if updated_ts is not None:
if start is not None and updated_ts <= start:
continue
if end is not None and updated_ts > end:
continue
doc_batch.append(document)
if not has_more:
yield from doc_batch
checkpoint.has_more = False
return checkpoint
# Sometimes no documents are retrieved, but the cursor
# is still updated so the connector makes progress.
yield from doc_batch
checkpoint.after_cursor_articles = after_cursor
last_doc_updated_at = doc_batch[-1].doc_updated_at if doc_batch else None
checkpoint.has_more = bool(
end is None
or last_doc_updated_at is None
or last_doc_updated_at.timestamp() <= end
)
checkpoint.cached_author_map = (
author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
)
return checkpoint
def _retrieve_tickets(
self,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
checkpoint: ZendeskConnectorCheckpoint,
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
checkpoint = copy.deepcopy(checkpoint)
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
doc_batch: list[Document] = []
next_start_time = int(checkpoint.next_start_time_tickets or start or 0)
ticket_response = _get_tickets_page(self.client, start_time=next_start_time)
tickets = ticket_response.data
has_more = ticket_response.has_more
next_start_time = ticket_response.meta["end_time"]
for ticket in tickets:
if ticket.get("status") == "deleted":
continue
try:
new_author_map, document = _ticket_to_document(
ticket=ticket,
author_map=author_map,
client=self.client,
)
except Exception as e:
logging.error(f"Error processing ticket {ticket['id']}: {e}")
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=f"{ticket.get('id')}",
document_link=ticket.get("url", ""),
),
failure_message=str(e),
exception=e,
)
continue
if new_author_map:
author_map.update(new_author_map)
updated_at = document.doc_updated_at
updated_ts = updated_at.timestamp() if updated_at else None
if updated_ts is not None:
if start is not None and updated_ts <= start:
continue
if end is not None and updated_ts > end:
continue
doc_batch.append(document)
if not has_more:
yield from doc_batch
checkpoint.has_more = False
return checkpoint
yield from doc_batch
checkpoint.next_start_time_tickets = next_start_time
last_doc_updated_at = doc_batch[-1].doc_updated_at if doc_batch else None
checkpoint.has_more = bool(
end is None
or last_doc_updated_at is None
or last_doc_updated_at.timestamp() <= end
)
checkpoint.cached_author_map = (
author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
)
return checkpoint
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> GenerateSlimDocumentOutput:
slim_doc_batch: list[SlimDocument] = []
if self.content_type == "articles":
articles = _get_articles(
self.client, start_time=int(start) if start else None
)
for article in articles:
slim_doc_batch.append(
SlimDocument(
id=f"article:{article['id']}",
)
)
if len(slim_doc_batch) >= _SLIM_BATCH_SIZE:
yield slim_doc_batch
slim_doc_batch = []
elif self.content_type == "tickets":
tickets = _get_tickets(
self.client, start_time=int(start) if start else None
)
for ticket in tickets:
slim_doc_batch.append(
SlimDocument(
id=f"zendesk_ticket_{ticket['id']}",
)
)
if len(slim_doc_batch) >= _SLIM_BATCH_SIZE:
yield slim_doc_batch
slim_doc_batch = []
else:
raise ValueError(f"Unsupported content_type: {self.content_type}")
if slim_doc_batch:
yield slim_doc_batch
@override
def validate_connector_settings(self) -> None:
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
try:
_get_article_page(self.client, start_time=0)
except HTTPError as e:
# Check for HTTP status codes
if e.response.status_code == 401:
raise CredentialExpiredError(
"Your Zendesk credentials appear to be invalid or expired (HTTP 401)."
) from e
elif e.response.status_code == 403:
raise InsufficientPermissionsError(
"Your Zendesk token does not have sufficient permissions (HTTP 403)."
) from e
elif e.response.status_code == 404:
raise ConnectorValidationError(
"Zendesk resource not found (HTTP 404)."
) from e
else:
raise ConnectorValidationError(
f"Unexpected Zendesk error (status={e.response.status_code}): {e}"
) from e
@override
def validate_checkpoint_json(
self, checkpoint_json: str
) -> ZendeskConnectorCheckpoint:
return ZendeskConnectorCheckpoint.model_validate_json(checkpoint_json)
@override
def build_dummy_checkpoint(self) -> ZendeskConnectorCheckpoint:
return ZendeskConnectorCheckpoint(
after_cursor_articles=None,
next_start_time_tickets=None,
cached_author_map=None,
cached_content_tags=None,
has_more=True,
)
if __name__ == "__main__":
import os
connector = ZendeskConnector(content_type="articles")
connector.load_credentials(
{
"zendesk_subdomain": os.environ["ZENDESK_SUBDOMAIN"],
"zendesk_email": os.environ["ZENDESK_EMAIL"],
"zendesk_token": os.environ["ZENDESK_TOKEN"],
}
)
current = time.time()
one_day_ago = current - 24 * 60 * 60 # 1 day
checkpoint = connector.build_dummy_checkpoint()
while checkpoint.has_more:
gen = connector.load_from_checkpoint(
one_day_ago, current, checkpoint
)
wrapper = CheckpointOutputWrapper()
any_doc = False
for document, failure, next_checkpoint in wrapper(gen):
if document:
print("got document:", document.id)
any_doc = True
checkpoint = next_checkpoint
if any_doc:
break | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/zendesk_connector.py",
"license": "Apache License 2.0",
"lines": 564,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/imap_connector.py | import copy
import email
from email.header import decode_header
import imaplib
import logging
import os
import re
from datetime import datetime, timedelta
from datetime import timezone
from email.message import Message
from email.utils import collapse_rfc2231_value, getaddresses
from enum import Enum
from typing import Any
from typing import cast
import uuid
import bs4
from pydantic import BaseModel
from common.data_source.config import IMAP_CONNECTOR_SIZE_THRESHOLD, DocumentSource
from common.data_source.interfaces import CheckpointOutput, CheckpointedConnectorWithPermSync, CredentialsConnector, CredentialsProviderInterface
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, Document, ExternalAccess, SecondsSinceUnixEpoch
_DEFAULT_IMAP_PORT_NUMBER = int(os.environ.get("IMAP_PORT", 993))
_IMAP_OKAY_STATUS = "OK"
_PAGE_SIZE = 100
_USERNAME_KEY = "imap_username"
_PASSWORD_KEY = "imap_password"
class Header(str, Enum):
SUBJECT_HEADER = "subject"
FROM_HEADER = "from"
TO_HEADER = "to"
CC_HEADER = "cc"
DELIVERED_TO_HEADER = (
"Delivered-To" # Used in mailing lists instead of the "to" header.
)
DATE_HEADER = "date"
MESSAGE_ID_HEADER = "Message-ID"
class EmailHeaders(BaseModel):
"""
Model for email headers extracted from IMAP messages.
"""
id: str
subject: str
sender: str
recipients: str | None
cc: str | None
date: datetime
@classmethod
def from_email_msg(cls, email_msg: Message) -> "EmailHeaders":
def _decode(header: str, default: str | None = None) -> str | None:
value = email_msg.get(header, default)
if not value:
return None
decoded_fragments = decode_header(value)
decoded_strings: list[str] = []
for decoded_value, encoding in decoded_fragments:
if isinstance(decoded_value, bytes):
try:
decoded_strings.append(
decoded_value.decode(encoding or "utf-8", errors="replace")
)
except LookupError:
decoded_strings.append(
decoded_value.decode("utf-8", errors="replace")
)
elif isinstance(decoded_value, str):
decoded_strings.append(decoded_value)
else:
decoded_strings.append(str(decoded_value))
return "".join(decoded_strings)
def _parse_date(date_str: str | None) -> datetime | None:
if not date_str:
return None
try:
return email.utils.parsedate_to_datetime(date_str)
except (TypeError, ValueError):
return None
message_id = _decode(header=Header.MESSAGE_ID_HEADER)
if not message_id:
message_id = f"<generated-{uuid.uuid4()}@imap.local>"
# It's possible for the subject line to not exist or be an empty string.
subject = _decode(header=Header.SUBJECT_HEADER) or "Unknown Subject"
from_ = _decode(header=Header.FROM_HEADER)
to = _decode(header=Header.TO_HEADER)
if not to:
to = _decode(header=Header.DELIVERED_TO_HEADER)
cc = _decode(header=Header.CC_HEADER)
date_str = _decode(header=Header.DATE_HEADER)
date = _parse_date(date_str=date_str)
if not date:
date = datetime.now(tz=timezone.utc)
# If any of the above are `None`, model validation will fail.
# Therefore, no guards (i.e.: `if <header> is None: raise RuntimeError(..)`) were written.
return cls.model_validate(
{
"id": message_id,
"subject": subject,
"sender": from_,
"recipients": to,
"cc": cc,
"date": date,
}
)
class CurrentMailbox(BaseModel):
mailbox: str
todo_email_ids: list[str]
# An email has a list of mailboxes.
# Each mailbox has a list of email-ids inside of it.
#
# Usage:
# To use this checkpointer, first fetch all the mailboxes.
# Then, pop a mailbox and fetch all of its email-ids.
# Then, pop each email-id and fetch its content (and parse it, etc..).
# When you have popped all email-ids for this mailbox, pop the next mailbox and repeat the above process until you're done.
#
# For initial checkpointing, set both fields to `None`.
class ImapCheckpoint(ConnectorCheckpoint):
todo_mailboxes: list[str] | None = None
current_mailbox: CurrentMailbox | None = None
class LoginState(str, Enum):
LoggedIn = "logged_in"
LoggedOut = "logged_out"
class ImapConnector(
CredentialsConnector,
CheckpointedConnectorWithPermSync,
):
def __init__(
self,
host: str,
port: int = _DEFAULT_IMAP_PORT_NUMBER,
mailboxes: list[str] | None = None,
) -> None:
self._host = host
self._port = port
self._mailboxes = mailboxes
self._credentials: dict[str, Any] | None = None
@property
def credentials(self) -> dict[str, Any]:
if not self._credentials:
raise RuntimeError(
"Credentials have not been initialized; call `set_credentials_provider` first"
)
return self._credentials
def _get_mail_client(self) -> imaplib.IMAP4_SSL:
"""
Returns a new `imaplib.IMAP4_SSL` instance.
The `imaplib.IMAP4_SSL` object is supposed to be an "ephemeral" object; it's not something that you can login,
logout, then log back into again. I.e., the following will fail:
```py
mail_client.login(..)
mail_client.logout();
mail_client.login(..)
```
Therefore, you need a fresh, new instance in order to operate with IMAP. This function gives one to you.
# Notes
This function will throw an error if the credentials have not yet been set.
"""
def get_or_raise(name: str) -> str:
value = self.credentials.get(name)
if not value:
raise RuntimeError(f"Credential item {name=} was not found")
if not isinstance(value, str):
raise RuntimeError(
f"Credential item {name=} must be of type str, instead received {type(name)=}"
)
return value
username = get_or_raise(_USERNAME_KEY)
password = get_or_raise(_PASSWORD_KEY)
mail_client = imaplib.IMAP4_SSL(host=self._host, port=self._port)
status, _data = mail_client.login(user=username, password=password)
if status != _IMAP_OKAY_STATUS:
raise RuntimeError(f"Failed to log into imap server; {status=}")
return mail_client
def _load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ImapCheckpoint,
include_perm_sync: bool,
) -> CheckpointOutput[ImapCheckpoint]:
checkpoint = cast(ImapCheckpoint, copy.deepcopy(checkpoint))
checkpoint.has_more = True
mail_client = self._get_mail_client()
if checkpoint.todo_mailboxes is None:
# This is the dummy checkpoint.
# Fill it with mailboxes first.
if self._mailboxes:
checkpoint.todo_mailboxes = _sanitize_mailbox_names(self._mailboxes)
else:
fetched_mailboxes = _fetch_all_mailboxes_for_email_account(
mail_client=mail_client
)
if not fetched_mailboxes:
raise RuntimeError(
"Failed to find any mailboxes for this email account"
)
checkpoint.todo_mailboxes = _sanitize_mailbox_names(fetched_mailboxes)
return checkpoint
if (
not checkpoint.current_mailbox
or not checkpoint.current_mailbox.todo_email_ids
):
if not checkpoint.todo_mailboxes:
checkpoint.has_more = False
return checkpoint
mailbox = checkpoint.todo_mailboxes.pop()
email_ids = _fetch_email_ids_in_mailbox(
mail_client=mail_client,
mailbox=mailbox,
start=start,
end=end,
)
checkpoint.current_mailbox = CurrentMailbox(
mailbox=mailbox,
todo_email_ids=email_ids,
)
_select_mailbox(
mail_client=mail_client, mailbox=checkpoint.current_mailbox.mailbox
)
current_todos = cast(
list, copy.deepcopy(checkpoint.current_mailbox.todo_email_ids[:_PAGE_SIZE])
)
checkpoint.current_mailbox.todo_email_ids = (
checkpoint.current_mailbox.todo_email_ids[_PAGE_SIZE:]
)
for email_id in current_todos:
email_msg = _fetch_email(mail_client=mail_client, email_id=email_id)
if not email_msg:
logging.warning(f"Failed to fetch message {email_id=}; skipping")
continue
email_headers = EmailHeaders.from_email_msg(email_msg=email_msg)
msg_dt = email_headers.date
if msg_dt.tzinfo is None:
msg_dt = msg_dt.replace(tzinfo=timezone.utc)
else:
msg_dt = msg_dt.astimezone(timezone.utc)
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
if not (start_dt < msg_dt <= end_dt):
continue
email_doc = _convert_email_headers_and_body_into_document(
email_msg=email_msg,
email_headers=email_headers,
include_perm_sync=include_perm_sync,
)
yield email_doc
attachments = extract_attachments(email_msg)
for att in attachments:
yield attachment_to_document(email_doc, att, email_headers)
return checkpoint
# impls for BaseConnector
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._credentials = credentials
return None
def validate_connector_settings(self) -> None:
self._get_mail_client()
# impls for CredentialsConnector
def set_credentials_provider(
self, credentials_provider: CredentialsProviderInterface
) -> None:
self._credentials = credentials_provider.get_credentials()
# impls for CheckpointedConnector
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ImapCheckpoint,
) -> CheckpointOutput[ImapCheckpoint]:
return self._load_from_checkpoint(
start=start, end=end, checkpoint=checkpoint, include_perm_sync=False
)
def build_dummy_checkpoint(self) -> ImapCheckpoint:
return ImapCheckpoint(has_more=True)
def validate_checkpoint_json(self, checkpoint_json: str) -> ImapCheckpoint:
return ImapCheckpoint.model_validate_json(json_data=checkpoint_json)
# impls for CheckpointedConnectorWithPermSync
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ImapCheckpoint,
) -> CheckpointOutput[ImapCheckpoint]:
return self._load_from_checkpoint(
start=start, end=end, checkpoint=checkpoint, include_perm_sync=True
)
def _fetch_all_mailboxes_for_email_account(mail_client: imaplib.IMAP4_SSL) -> list[str]:
status, mailboxes_data = mail_client.list('""', "*")
if status != _IMAP_OKAY_STATUS:
raise RuntimeError(f"Failed to fetch mailboxes; {status=}")
mailboxes = []
for mailboxes_raw in mailboxes_data:
if isinstance(mailboxes_raw, bytes):
mailboxes_str = mailboxes_raw.decode()
elif isinstance(mailboxes_raw, str):
mailboxes_str = mailboxes_raw
else:
logging.warning(
f"Expected the mailbox data to be of type str, instead got {type(mailboxes_raw)=} {mailboxes_raw}; skipping"
)
continue
# The mailbox LIST response output can be found here:
# https://www.rfc-editor.org/rfc/rfc3501.html#section-7.2.2
#
# The general format is:
# `(<name-attributes>) <hierarchy-delimiter> <mailbox-name>`
#
# The below regex matches on that pattern; from there, we select the 3rd match (index 2), which is the mailbox-name.
match = re.match(r'\([^)]*\)\s+"([^"]+)"\s+"?(.+?)"?$', mailboxes_str)
if not match:
logging.warning(
f"Invalid mailbox-data formatting structure: {mailboxes_str=}; skipping"
)
continue
mailbox = match.group(2)
mailboxes.append(mailbox)
if not mailboxes:
logging.warning(
"No mailboxes parsed from LIST response; falling back to INBOX"
)
return ["INBOX"]
return mailboxes
def _select_mailbox(mail_client: imaplib.IMAP4_SSL, mailbox: str) -> bool:
try:
status, _ = mail_client.select(mailbox=mailbox, readonly=True)
if status != _IMAP_OKAY_STATUS:
return False
return True
except Exception:
return False
def _fetch_email_ids_in_mailbox(
mail_client: imaplib.IMAP4_SSL,
mailbox: str,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
) -> list[str]:
if not _select_mailbox(mail_client, mailbox):
logging.warning(f"Skip mailbox: {mailbox}")
return []
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end, tz=timezone.utc) + timedelta(days=1)
start_str = start_dt.strftime("%d-%b-%Y")
end_str = end_dt.strftime("%d-%b-%Y")
search_criteria = f'(SINCE "{start_str}" BEFORE "{end_str}")'
status, email_ids_byte_array = mail_client.search(None, search_criteria)
if status != _IMAP_OKAY_STATUS or not email_ids_byte_array:
raise RuntimeError(f"Failed to fetch email ids; {status=}")
email_ids: bytes = email_ids_byte_array[0]
return [email_id.decode() for email_id in email_ids.split()]
def _fetch_email(mail_client: imaplib.IMAP4_SSL, email_id: str) -> Message | None:
status, msg_data = mail_client.fetch(message_set=email_id, message_parts="(RFC822)")
if status != _IMAP_OKAY_STATUS or not msg_data:
return None
data = msg_data[0]
if not isinstance(data, tuple):
raise RuntimeError(
f"Message data should be a tuple; instead got a {type(data)=} {data=}"
)
_, raw_email = data
return email.message_from_bytes(raw_email)
def _convert_email_headers_and_body_into_document(
email_msg: Message,
email_headers: EmailHeaders,
include_perm_sync: bool,
) -> Document:
sender_name, sender_addr = _parse_singular_addr(raw_header=email_headers.sender)
to_addrs = (
_parse_addrs(email_headers.recipients)
if email_headers.recipients
else []
)
cc_addrs = (
_parse_addrs(email_headers.cc)
if email_headers.cc
else []
)
all_participants = to_addrs + cc_addrs
expert_info_map = {
recipient_addr: BasicExpertInfo(
display_name=recipient_name, email=recipient_addr
)
for recipient_name, recipient_addr in all_participants
}
if sender_addr not in expert_info_map:
expert_info_map[sender_addr] = BasicExpertInfo(
display_name=sender_name, email=sender_addr
)
email_body = _parse_email_body(email_msg=email_msg, email_headers=email_headers)
primary_owners = list(expert_info_map.values())
external_access = (
ExternalAccess(
external_user_emails=set(expert_info_map.keys()),
external_user_group_ids=set(),
is_public=False,
)
if include_perm_sync
else None
)
return Document(
id=email_headers.id,
title=email_headers.subject,
blob=email_body,
size_bytes=len(email_body),
semantic_identifier=email_headers.subject,
metadata={},
extension='.txt',
doc_updated_at=email_headers.date,
source=DocumentSource.IMAP,
primary_owners=primary_owners,
external_access=external_access,
)
def extract_attachments(email_msg: Message, max_bytes: int = IMAP_CONNECTOR_SIZE_THRESHOLD):
attachments = []
if not email_msg.is_multipart():
return attachments
for part in email_msg.walk():
if part.get_content_maintype() == "multipart":
continue
disposition = (part.get("Content-Disposition") or "").lower()
filename = part.get_filename()
if not (
disposition.startswith("attachment")
or (disposition.startswith("inline") and filename)
):
continue
payload = part.get_payload(decode=True)
if not payload:
continue
if len(payload) > max_bytes:
continue
attachments.append({
"filename": filename or "attachment.bin",
"content_type": part.get_content_type(),
"content_bytes": payload,
"size_bytes": len(payload),
})
return attachments
def decode_mime_filename(raw: str | None) -> str | None:
if not raw:
return None
try:
raw = collapse_rfc2231_value(raw)
except Exception:
pass
parts = decode_header(raw)
decoded = []
for value, encoding in parts:
if isinstance(value, bytes):
decoded.append(value.decode(encoding or "utf-8", errors="replace"))
else:
decoded.append(value)
return "".join(decoded)
def attachment_to_document(
parent_doc: Document,
att: dict,
email_headers: EmailHeaders,
):
raw_filename = att["filename"]
filename = decode_mime_filename(raw_filename) or "attachment.bin"
ext = "." + filename.split(".")[-1] if "." in filename else ""
return Document(
id=f"{parent_doc.id}#att:{filename}",
source=DocumentSource.IMAP,
semantic_identifier=filename,
extension=ext,
blob=att["content_bytes"],
size_bytes=att["size_bytes"],
doc_updated_at=email_headers.date,
primary_owners=parent_doc.primary_owners,
metadata={
"parent_email_id": parent_doc.id,
"parent_subject": email_headers.subject,
"attachment_filename": filename,
"attachment_content_type": att["content_type"],
},
)
def _parse_email_body(
email_msg: Message,
email_headers: EmailHeaders,
) -> str:
body = None
for part in email_msg.walk():
if part.is_multipart():
# Multipart parts are *containers* for other parts, not the actual content itself.
# Therefore, we skip until we find the individual parts instead.
continue
charset = part.get_content_charset() or "utf-8"
try:
raw_payload = part.get_payload(decode=True)
if not isinstance(raw_payload, bytes):
logging.warning(
"Payload section from email was expected to be an array of bytes, instead got "
f"{type(raw_payload)=}, {raw_payload=}"
)
continue
body = raw_payload.decode(charset)
break
except (UnicodeDecodeError, LookupError) as e:
logging.warning(f"Could not decode part with charset {charset}. Error: {e}")
continue
if not body:
logging.warning(
f"Email with {email_headers.id=} has an empty body; returning an empty string"
)
return ""
soup = bs4.BeautifulSoup(markup=body, features="html.parser")
return " ".join(str_section for str_section in soup.stripped_strings)
def _sanitize_mailbox_names(mailboxes: list[str]) -> list[str]:
"""
Mailboxes with special characters in them must be enclosed by double-quotes, as per the IMAP protocol.
Just to be safe, we wrap *all* mailboxes with double-quotes.
"""
return [f'"{mailbox}"' for mailbox in mailboxes if mailbox]
def _parse_addrs(raw_header: str) -> list[tuple[str, str]]:
if not raw_header:
return []
return getaddresses([raw_header])
def _parse_singular_addr(raw_header: str) -> tuple[str, str]:
addrs = _parse_addrs(raw_header=raw_header)
if not addrs:
return ("Unknown", "unknown@example.com")
elif len(addrs) >= 2:
raise RuntimeError(
f"Expected a singular address, but instead got multiple; {raw_header=} {addrs=}"
)
return addrs[0]
if __name__ == "__main__":
import time
from types import TracebackType
from common.data_source.utils import load_all_docs_from_checkpoint_connector
class OnyxStaticCredentialsProvider(
CredentialsProviderInterface["OnyxStaticCredentialsProvider"]
):
"""Implementation (a very simple one!) to handle static credentials."""
def __init__(
self,
tenant_id: str | None,
connector_name: str,
credential_json: dict[str, Any],
):
self._tenant_id = tenant_id
self._connector_name = connector_name
self._credential_json = credential_json
self._provider_key = str(uuid.uuid4())
def __enter__(self) -> "OnyxStaticCredentialsProvider":
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def get_tenant_id(self) -> str | None:
return self._tenant_id
def get_provider_key(self) -> str:
return self._provider_key
def get_credentials(self) -> dict[str, Any]:
return self._credential_json
def set_credentials(self, credential_json: dict[str, Any]) -> None:
self._credential_json = credential_json
def is_dynamic(self) -> bool:
return False
# from tests.daily.connectors.utils import load_all_docs_from_checkpoint_connector
# from onyx.connectors.credentials_provider import OnyxStaticCredentialsProvider
host = os.environ.get("IMAP_HOST")
mailboxes_str = os.environ.get("IMAP_MAILBOXES","INBOX")
username = os.environ.get("IMAP_USERNAME")
password = os.environ.get("IMAP_PASSWORD")
mailboxes = (
[mailbox.strip() for mailbox in mailboxes_str.split(",")]
if mailboxes_str
else []
)
if not host:
raise RuntimeError("`IMAP_HOST` must be set")
imap_connector = ImapConnector(
host=host,
mailboxes=mailboxes,
)
imap_connector.set_credentials_provider(
OnyxStaticCredentialsProvider(
tenant_id=None,
connector_name=DocumentSource.IMAP,
credential_json={
_USERNAME_KEY: username,
_PASSWORD_KEY: password,
},
)
)
END = time.time()
START = END - 1 * 24 * 60 * 60
for doc in load_all_docs_from_checkpoint_connector(
connector=imap_connector,
start=START,
end=END,
):
print(doc.id,doc.extension)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/imap_connector.py",
"license": "Apache License 2.0",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/connector_runner.py | import sys
import time
import logging
from collections.abc import Generator
from datetime import datetime
from typing import Generic
from typing import TypeVar
from common.data_source.interfaces import (
BaseConnector,
CheckpointedConnector,
CheckpointedConnectorWithPermSync,
CheckpointOutput,
LoadConnector,
PollConnector,
)
from common.data_source.models import ConnectorCheckpoint, ConnectorFailure, Document
TimeRange = tuple[datetime, datetime]
CT = TypeVar("CT", bound=ConnectorCheckpoint)
def batched_doc_ids(
checkpoint_connector_generator: CheckpointOutput[CT],
batch_size: int,
) -> Generator[set[str], None, None]:
batch: set[str] = set()
for document, failure, next_checkpoint in CheckpointOutputWrapper[CT]()(
checkpoint_connector_generator
):
if document is not None:
batch.add(document.id)
elif (
failure and failure.failed_document and failure.failed_document.document_id
):
batch.add(failure.failed_document.document_id)
if len(batch) >= batch_size:
yield batch
batch = set()
if len(batch) > 0:
yield batch
class CheckpointOutputWrapper(Generic[CT]):
"""
Wraps a CheckpointOutput generator to give things back in a more digestible format,
specifically for Document outputs.
The connector format is easier for the connector implementor (e.g. it enforces exactly
one new checkpoint is returned AND that the checkpoint is at the end), thus the different
formats.
"""
def __init__(self) -> None:
self.next_checkpoint: CT | None = None
def __call__(
self,
checkpoint_connector_generator: CheckpointOutput[CT],
) -> Generator[
tuple[Document | None, ConnectorFailure | None, CT | None],
None,
None,
]:
# grabs the final return value and stores it in the `next_checkpoint` variable
def _inner_wrapper(
checkpoint_connector_generator: CheckpointOutput[CT],
) -> CheckpointOutput[CT]:
self.next_checkpoint = yield from checkpoint_connector_generator
return self.next_checkpoint # not used
for document_or_failure in _inner_wrapper(checkpoint_connector_generator):
if isinstance(document_or_failure, Document):
yield document_or_failure, None, None
elif isinstance(document_or_failure, ConnectorFailure):
yield None, document_or_failure, None
else:
raise ValueError(
f"Invalid document_or_failure type: {type(document_or_failure)}"
)
if self.next_checkpoint is None:
raise RuntimeError(
"Checkpoint is None. This should never happen - the connector should always return a checkpoint."
)
yield None, None, self.next_checkpoint
class ConnectorRunner(Generic[CT]):
"""
Handles:
- Batching
- Additional exception logging
- Combining different connector types to a single interface
"""
def __init__(
self,
connector: BaseConnector,
batch_size: int,
# cannot be True for non-checkpointed connectors
include_permissions: bool,
time_range: TimeRange | None = None,
):
if not isinstance(connector, CheckpointedConnector) and include_permissions:
raise ValueError(
"include_permissions cannot be True for non-checkpointed connectors"
)
self.connector = connector
self.time_range = time_range
self.batch_size = batch_size
self.include_permissions = include_permissions
self.doc_batch: list[Document] = []
def run(self, checkpoint: CT) -> Generator[
tuple[list[Document] | None, ConnectorFailure | None, CT | None],
None,
None,
]:
"""Adds additional exception logging to the connector."""
try:
if isinstance(self.connector, CheckpointedConnector):
if self.time_range is None:
raise ValueError("time_range is required for CheckpointedConnector")
start = time.monotonic()
if self.include_permissions:
if not isinstance(
self.connector, CheckpointedConnectorWithPermSync
):
raise ValueError(
"Connector does not support permission syncing"
)
load_from_checkpoint = (
self.connector.load_from_checkpoint_with_perm_sync
)
else:
load_from_checkpoint = self.connector.load_from_checkpoint
checkpoint_connector_generator = load_from_checkpoint(
start=self.time_range[0].timestamp(),
end=self.time_range[1].timestamp(),
checkpoint=checkpoint,
)
next_checkpoint: CT | None = None
# this is guaranteed to always run at least once with next_checkpoint being non-None
for document, failure, next_checkpoint in CheckpointOutputWrapper[CT]()(
checkpoint_connector_generator
):
if document is not None and isinstance(document, Document):
self.doc_batch.append(document)
if failure is not None:
yield None, failure, None
if len(self.doc_batch) >= self.batch_size:
yield self.doc_batch, None, None
self.doc_batch = []
# yield remaining documents
if len(self.doc_batch) > 0:
yield self.doc_batch, None, None
self.doc_batch = []
yield None, None, next_checkpoint
logging.debug(
f"Connector took {time.monotonic() - start} seconds to get to the next checkpoint."
)
else:
finished_checkpoint = self.connector.build_dummy_checkpoint()
finished_checkpoint.has_more = False
if isinstance(self.connector, PollConnector):
if self.time_range is None:
raise ValueError("time_range is required for PollConnector")
for document_batch in self.connector.poll_source(
start=self.time_range[0].timestamp(),
end=self.time_range[1].timestamp(),
):
yield document_batch, None, None
yield None, None, finished_checkpoint
elif isinstance(self.connector, LoadConnector):
for document_batch in self.connector.load_from_state():
yield document_batch, None, None
yield None, None, finished_checkpoint
else:
raise ValueError(f"Invalid connector. type: {type(self.connector)}")
except Exception:
exc_type, _, exc_traceback = sys.exc_info()
# Traverse the traceback to find the last frame where the exception was raised
tb = exc_traceback
if tb is None:
logging.error("No traceback found for exception")
raise
while tb.tb_next:
tb = tb.tb_next # Move to the next frame in the traceback
# Get the local variables from the frame where the exception occurred
local_vars = tb.tb_frame.f_locals
local_vars_str = "\n".join(
f"{key}: {value}" for key, value in local_vars.items()
)
logging.error(
f"Error in connector. type: {exc_type};\n"
f"local_vars below -> \n{local_vars_str[:1024]}"
)
raise | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/connector_runner.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/github/connector.py | import copy
import logging
from collections.abc import Callable
from collections.abc import Generator
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from enum import Enum
from typing import Any
from typing import cast
from github import Github, Auth
from github import RateLimitExceededException
from github import Repository
from github.GithubException import GithubException
from github.Issue import Issue
from github.NamedUser import NamedUser
from github.PaginatedList import PaginatedList
from github.PullRequest import PullRequest
from pydantic import BaseModel
from typing_extensions import override
from common.data_source.utils import sanitize_filename
from common.data_source.config import DocumentSource, GITHUB_CONNECTOR_BASE_URL
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import CheckpointedConnectorWithPermSyncGH, CheckpointOutput
from common.data_source.models import (
ConnectorCheckpoint,
ConnectorFailure,
Document,
DocumentFailure,
ExternalAccess,
SecondsSinceUnixEpoch,
)
from common.data_source.connector_runner import ConnectorRunner
from .models import SerializedRepository
from .rate_limit_utils import sleep_after_rate_limit_exception
from .utils import deserialize_repository
from .utils import get_external_access_permission
ITEMS_PER_PAGE = 100
CURSOR_LOG_FREQUENCY = 50
_MAX_NUM_RATE_LIMIT_RETRIES = 5
ONE_DAY = timedelta(days=1)
SLIM_BATCH_SIZE = 100
# Cases
# X (from start) standard run, no fallback to cursor-based pagination
# X (from start) standard run errors, fallback to cursor-based pagination
# X error in the middle of a page
# X no errors: run to completion
# X (from checkpoint) standard run, no fallback to cursor-based pagination
# X (from checkpoint) continue from cursor-based pagination
# - retrying
# - no retrying
# things to check:
# checkpoint state on return
# checkpoint progress (no infinite loop)
class DocMetadata(BaseModel):
repo: str
def get_nextUrl_key(pag_list: PaginatedList[PullRequest | Issue]) -> str:
if "_PaginatedList__nextUrl" in pag_list.__dict__:
return "_PaginatedList__nextUrl"
for key in pag_list.__dict__:
if "__nextUrl" in key:
return key
for key in pag_list.__dict__:
if "nextUrl" in key:
return key
return ""
def get_nextUrl(
pag_list: PaginatedList[PullRequest | Issue], nextUrl_key: str
) -> str | None:
return getattr(pag_list, nextUrl_key) if nextUrl_key else None
def set_nextUrl(
pag_list: PaginatedList[PullRequest | Issue], nextUrl_key: str, nextUrl: str
) -> None:
if nextUrl_key:
setattr(pag_list, nextUrl_key, nextUrl)
elif nextUrl:
raise ValueError("Next URL key not found: " + str(pag_list.__dict__))
def _paginate_until_error(
git_objs: Callable[[], PaginatedList[PullRequest | Issue]],
cursor_url: str | None,
prev_num_objs: int,
cursor_url_callback: Callable[[str | None, int], None],
retrying: bool = False,
) -> Generator[PullRequest | Issue, None, None]:
num_objs = prev_num_objs
pag_list = git_objs()
nextUrl_key = get_nextUrl_key(pag_list)
if cursor_url:
set_nextUrl(pag_list, nextUrl_key, cursor_url)
elif retrying:
# if we are retrying, we want to skip the objects retrieved
# over previous calls. Unfortunately, this WILL retrieve all
# pages before the one we are resuming from, so we really
# don't want this case to be hit often
logging.warning(
"Retrying from a previous cursor-based pagination call. "
"This will retrieve all pages before the one we are resuming from, "
"which may take a while and consume many API calls."
)
pag_list = cast(PaginatedList[PullRequest | Issue], pag_list[prev_num_objs:])
num_objs = 0
try:
# this for loop handles cursor-based pagination
for issue_or_pr in pag_list:
num_objs += 1
yield issue_or_pr
# used to store the current cursor url in the checkpoint. This value
# is updated during iteration over pag_list.
cursor_url_callback(get_nextUrl(pag_list, nextUrl_key), num_objs)
if num_objs % CURSOR_LOG_FREQUENCY == 0:
logging.info(
f"Retrieved {num_objs} objects with current cursor url: {get_nextUrl(pag_list, nextUrl_key)}"
)
except Exception as e:
logging.exception(f"Error during cursor-based pagination: {e}")
if num_objs - prev_num_objs > 0:
raise
if get_nextUrl(pag_list, nextUrl_key) is not None and not retrying:
logging.info(
"Assuming that this error is due to cursor "
"expiration because no objects were retrieved. "
"Retrying from the first page."
)
yield from _paginate_until_error(
git_objs, None, prev_num_objs, cursor_url_callback, retrying=True
)
return
# for no cursor url or if we reach this point after a retry, raise the error
raise
def _get_batch_rate_limited(
# We pass in a callable because we want git_objs to produce a fresh
# PaginatedList each time it's called to avoid using the same object for cursor-based pagination
# from a partial offset-based pagination call.
git_objs: Callable[[], PaginatedList],
page_num: int,
cursor_url: str | None,
prev_num_objs: int,
cursor_url_callback: Callable[[str | None, int], None],
github_client: Github,
attempt_num: int = 0,
) -> Generator[PullRequest | Issue, None, None]:
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching batch too many times. Something is going wrong with fetching objects from Github"
)
try:
if cursor_url:
# when this is set, we are resuming from an earlier
# cursor-based pagination call.
yield from _paginate_until_error(
git_objs, cursor_url, prev_num_objs, cursor_url_callback
)
return
objs = list(git_objs().get_page(page_num))
# fetch all data here to disable lazy loading later
# this is needed to capture the rate limit exception here (if one occurs)
for obj in objs:
if hasattr(obj, "raw_data"):
getattr(obj, "raw_data")
yield from objs
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
yield from _get_batch_rate_limited(
git_objs,
page_num,
cursor_url,
prev_num_objs,
cursor_url_callback,
github_client,
attempt_num + 1,
)
except GithubException as e:
if not (
e.status == 422
and (
"cursor" in (e.message or "")
or "cursor" in (e.data or {}).get("message", "")
)
):
raise
# Fallback to a cursor-based pagination strategy
# This can happen for "large datasets," but there's no documentation
# On the error on the web as far as we can tell.
# Error message:
# "Pagination with the page parameter is not supported for large datasets,
# please use cursor based pagination (after/before)"
yield from _paginate_until_error(
git_objs, cursor_url, prev_num_objs, cursor_url_callback
)
def _get_userinfo(user: NamedUser) -> dict[str, str]:
def _safe_get(attr_name: str) -> str | None:
try:
return cast(str | None, getattr(user, attr_name))
except GithubException:
logging.debug(f"Error getting {attr_name} for user")
return None
return {
k: v
for k, v in {
"login": _safe_get("login"),
"name": _safe_get("name"),
"email": _safe_get("email"),
}.items()
if v is not None
}
def _convert_pr_to_document(
pull_request: PullRequest, repo_external_access: ExternalAccess | None
) -> Document:
repo_name = pull_request.base.repo.full_name if pull_request.base else ""
doc_metadata = DocMetadata(repo=repo_name)
file_content_byte = pull_request.body.encode('utf-8') if pull_request.body else b""
name = sanitize_filename(pull_request.title, "md")
return Document(
id=pull_request.html_url,
blob= file_content_byte,
source=DocumentSource.GITHUB,
external_access=repo_external_access,
semantic_identifier=f"{pull_request.number}:{name}",
# updated_at is UTC time but is timezone unaware, explicitly add UTC
# as there is logic in indexing to prevent wrong timestamped docs
# due to local time discrepancies with UTC
doc_updated_at=(
pull_request.updated_at.replace(tzinfo=timezone.utc)
if pull_request.updated_at
else None
),
extension=".md",
# this metadata is used in perm sync
size_bytes=len(file_content_byte) if file_content_byte else 0,
primary_owners=[],
doc_metadata=doc_metadata.model_dump(),
metadata={
k: [str(vi) for vi in v] if isinstance(v, list) else str(v)
for k, v in {
"object_type": "PullRequest",
"id": pull_request.number,
"merged": pull_request.merged,
"state": pull_request.state,
"user": _get_userinfo(pull_request.user) if pull_request.user else None,
"assignees": [
_get_userinfo(assignee) for assignee in pull_request.assignees
],
"repo": (
pull_request.base.repo.full_name if pull_request.base else None
),
"num_commits": str(pull_request.commits),
"num_files_changed": str(pull_request.changed_files),
"labels": [label.name for label in pull_request.labels],
"created_at": (
pull_request.created_at.replace(tzinfo=timezone.utc)
if pull_request.created_at
else None
),
"updated_at": (
pull_request.updated_at.replace(tzinfo=timezone.utc)
if pull_request.updated_at
else None
),
"closed_at": (
pull_request.closed_at.replace(tzinfo=timezone.utc)
if pull_request.closed_at
else None
),
"merged_at": (
pull_request.merged_at.replace(tzinfo=timezone.utc)
if pull_request.merged_at
else None
),
"merged_by": (
_get_userinfo(pull_request.merged_by)
if pull_request.merged_by
else None
),
}.items()
if v is not None
},
)
def _fetch_issue_comments(issue: Issue) -> str:
comments = issue.get_comments()
return "\nComment: ".join(comment.body for comment in comments)
def _convert_issue_to_document(
issue: Issue, repo_external_access: ExternalAccess | None
) -> Document:
repo_name = issue.repository.full_name if issue.repository else ""
doc_metadata = DocMetadata(repo=repo_name)
file_content_byte = issue.body.encode('utf-8') if issue.body else b""
name = sanitize_filename(issue.title, "md")
return Document(
id=issue.html_url,
blob=file_content_byte,
source=DocumentSource.GITHUB,
extension=".md",
external_access=repo_external_access,
semantic_identifier=f"{issue.number}:{name}",
# updated_at is UTC time but is timezone unaware
doc_updated_at=issue.updated_at.replace(tzinfo=timezone.utc),
# this metadata is used in perm sync
doc_metadata=doc_metadata.model_dump(),
size_bytes=len(file_content_byte) if file_content_byte else 0,
primary_owners=[_get_userinfo(issue.user) if issue.user else None],
metadata={
k: [str(vi) for vi in v] if isinstance(v, list) else str(v)
for k, v in {
"object_type": "Issue",
"id": issue.number,
"state": issue.state,
"user": _get_userinfo(issue.user) if issue.user else None,
"assignees": [_get_userinfo(assignee) for assignee in issue.assignees],
"repo": issue.repository.full_name if issue.repository else None,
"labels": [label.name for label in issue.labels],
"created_at": (
issue.created_at.replace(tzinfo=timezone.utc)
if issue.created_at
else None
),
"updated_at": (
issue.updated_at.replace(tzinfo=timezone.utc)
if issue.updated_at
else None
),
"closed_at": (
issue.closed_at.replace(tzinfo=timezone.utc)
if issue.closed_at
else None
),
"closed_by": (
_get_userinfo(issue.closed_by) if issue.closed_by else None
),
}.items()
if v is not None
},
)
class GithubConnectorStage(Enum):
START = "start"
PRS = "prs"
ISSUES = "issues"
class GithubConnectorCheckpoint(ConnectorCheckpoint):
stage: GithubConnectorStage
curr_page: int
cached_repo_ids: list[int] | None = None
cached_repo: SerializedRepository | None = None
# Used for the fallback cursor-based pagination strategy
num_retrieved: int
cursor_url: str | None = None
def reset(self) -> None:
"""
Resets curr_page, num_retrieved, and cursor_url to their initial values (0, 0, None)
"""
self.curr_page = 0
self.num_retrieved = 0
self.cursor_url = None
def make_cursor_url_callback(
checkpoint: GithubConnectorCheckpoint,
) -> Callable[[str | None, int], None]:
def cursor_url_callback(cursor_url: str | None, num_objs: int) -> None:
# we want to maintain the old cursor url so code after retrieval
# can determine that we are using the fallback cursor-based pagination strategy
if cursor_url:
checkpoint.cursor_url = cursor_url
checkpoint.num_retrieved = num_objs
return cursor_url_callback
class GithubConnector(CheckpointedConnectorWithPermSyncGH[GithubConnectorCheckpoint]):
def __init__(
self,
repo_owner: str,
repositories: str | None = None,
state_filter: str = "all",
include_prs: bool = True,
include_issues: bool = False,
) -> None:
self.repo_owner = repo_owner
self.repositories = repositories
self.state_filter = state_filter
self.include_prs = include_prs
self.include_issues = include_issues
self.github_client: Github | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
# defaults to 30 items per page, can be set to as high as 100
token = credentials["github_access_token"]
auth = Auth.Token(token)
if GITHUB_CONNECTOR_BASE_URL:
self.github_client = Github(
auth=auth,
base_url=GITHUB_CONNECTOR_BASE_URL,
per_page=ITEMS_PER_PAGE,
)
else:
self.github_client = Github(
auth=auth,
per_page=ITEMS_PER_PAGE,
)
return None
def get_github_repo(
self, github_client: Github, attempt_num: int = 0
) -> Repository.Repository:
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching repo too many times. Something is going wrong with fetching objects from Github"
)
try:
return github_client.get_repo(f"{self.repo_owner}/{self.repositories}")
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
return self.get_github_repo(github_client, attempt_num + 1)
def get_github_repos(
self, github_client: Github, attempt_num: int = 0
) -> list[Repository.Repository]:
"""Get specific repositories based on comma-separated repo_name string."""
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching repos too many times. Something is going wrong with fetching objects from Github"
)
try:
repos = []
# Split repo_name by comma and strip whitespace
repo_names = [
name.strip() for name in (cast(str, self.repositories)).split(",")
]
for repo_name in repo_names:
if repo_name: # Skip empty strings
try:
repo = github_client.get_repo(f"{self.repo_owner}/{repo_name}")
repos.append(repo)
except GithubException as e:
logging.warning(
f"Could not fetch repo {self.repo_owner}/{repo_name}: {e}"
)
return repos
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
return self.get_github_repos(github_client, attempt_num + 1)
def get_all_repos(
self, github_client: Github, attempt_num: int = 0
) -> list[Repository.Repository]:
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching repos too many times. Something is going wrong with fetching objects from Github"
)
try:
# Try to get organization first
try:
org = github_client.get_organization(self.repo_owner)
return list(org.get_repos())
except GithubException:
# If not an org, try as a user
user = github_client.get_user(self.repo_owner)
return list(user.get_repos())
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
return self.get_all_repos(github_client, attempt_num + 1)
def _pull_requests_func(
self, repo: Repository.Repository
) -> Callable[[], PaginatedList[PullRequest]]:
return lambda: repo.get_pulls(
state=self.state_filter, sort="updated", direction="desc"
)
def _issues_func(
self, repo: Repository.Repository
) -> Callable[[], PaginatedList[Issue]]:
return lambda: repo.get_issues(
state=self.state_filter, sort="updated", direction="desc"
)
def _fetch_from_github(
self,
checkpoint: GithubConnectorCheckpoint,
start: datetime | None = None,
end: datetime | None = None,
include_permissions: bool = False,
) -> Generator[Document | ConnectorFailure, None, GithubConnectorCheckpoint]:
if self.github_client is None:
raise ConnectorMissingCredentialError("GitHub")
checkpoint = copy.deepcopy(checkpoint)
# First run of the connector, fetch all repos and store in checkpoint
if checkpoint.cached_repo_ids is None:
repos = []
if self.repositories:
if "," in self.repositories:
# Multiple repositories specified
repos = self.get_github_repos(self.github_client)
else:
# Single repository (backward compatibility)
repos = [self.get_github_repo(self.github_client)]
else:
# All repositories
repos = self.get_all_repos(self.github_client)
if not repos:
checkpoint.has_more = False
return checkpoint
curr_repo = repos.pop()
checkpoint.cached_repo_ids = [repo.id for repo in repos]
checkpoint.cached_repo = SerializedRepository(
id=curr_repo.id,
headers=curr_repo.raw_headers,
raw_data=curr_repo.raw_data,
)
checkpoint.stage = GithubConnectorStage.PRS
checkpoint.curr_page = 0
# save checkpoint with repo ids retrieved
return checkpoint
if checkpoint.cached_repo is None:
raise ValueError("No repo saved in checkpoint")
# Deserialize the repository from the checkpoint
repo = deserialize_repository(checkpoint.cached_repo, self.github_client)
cursor_url_callback = make_cursor_url_callback(checkpoint)
repo_external_access: ExternalAccess | None = None
if include_permissions:
repo_external_access = get_external_access_permission(
repo, self.github_client
)
if self.include_prs and checkpoint.stage == GithubConnectorStage.PRS:
logging.info(f"Fetching PRs for repo: {repo.name}")
pr_batch = _get_batch_rate_limited(
self._pull_requests_func(repo),
checkpoint.curr_page,
checkpoint.cursor_url,
checkpoint.num_retrieved,
cursor_url_callback,
self.github_client,
)
checkpoint.curr_page += 1 # NOTE: not used for cursor-based fallback
done_with_prs = False
num_prs = 0
pr = None
print("start: ", start)
for pr in pr_batch:
num_prs += 1
print("-"*40)
print("PR name", pr.title)
print("updated at", pr.updated_at)
print("-"*40)
print("\n")
# we iterate backwards in time, so at this point we stop processing prs
if (
start is not None
and pr.updated_at
and pr.updated_at.replace(tzinfo=timezone.utc) <= start
):
done_with_prs = True
break
# Skip PRs updated after the end date
if (
end is not None
and pr.updated_at
and pr.updated_at.replace(tzinfo=timezone.utc) > end
):
continue
try:
yield _convert_pr_to_document(
cast(PullRequest, pr), repo_external_access
)
except Exception as e:
error_msg = f"Error converting PR to document: {e}"
logging.exception(error_msg)
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=str(pr.id), document_link=pr.html_url
),
failure_message=error_msg,
exception=e,
)
continue
# If we reach this point with a cursor url in the checkpoint, we were using
# the fallback cursor-based pagination strategy. That strategy tries to get all
# PRs, so having curosr_url set means we are done with prs. However, we need to
# return AFTER the checkpoint reset to avoid infinite loops.
# if we found any PRs on the page and there are more PRs to get, return the checkpoint.
# In offset mode, while indexing without time constraints, the pr batch
# will be empty when we're done.
used_cursor = checkpoint.cursor_url is not None
if num_prs > 0 and not done_with_prs and not used_cursor:
return checkpoint
# if we went past the start date during the loop or there are no more
# prs to get, we move on to issues
checkpoint.stage = GithubConnectorStage.ISSUES
checkpoint.reset()
if used_cursor:
# save the checkpoint after changing stage; next run will continue from issues
return checkpoint
checkpoint.stage = GithubConnectorStage.ISSUES
if self.include_issues and checkpoint.stage == GithubConnectorStage.ISSUES:
logging.info(f"Fetching issues for repo: {repo.name}")
issue_batch = list(
_get_batch_rate_limited(
self._issues_func(repo),
checkpoint.curr_page,
checkpoint.cursor_url,
checkpoint.num_retrieved,
cursor_url_callback,
self.github_client,
)
)
checkpoint.curr_page += 1
done_with_issues = False
num_issues = 0
for issue in issue_batch:
num_issues += 1
issue = cast(Issue, issue)
# we iterate backwards in time, so at this point we stop processing prs
if (
start is not None
and issue.updated_at.replace(tzinfo=timezone.utc) <= start
):
done_with_issues = True
break
# Skip PRs updated after the end date
if (
end is not None
and issue.updated_at.replace(tzinfo=timezone.utc) > end
):
continue
if issue.pull_request is not None:
# PRs are handled separately
continue
try:
yield _convert_issue_to_document(issue, repo_external_access)
except Exception as e:
error_msg = f"Error converting issue to document: {e}"
logging.exception(error_msg)
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=str(issue.id),
document_link=issue.html_url,
),
failure_message=error_msg,
exception=e,
)
continue
# if we found any issues on the page, and we're not done, return the checkpoint.
# don't return if we're using cursor-based pagination to avoid infinite loops
if num_issues > 0 and not done_with_issues and not checkpoint.cursor_url:
return checkpoint
# if we went past the start date during the loop or there are no more
# issues to get, we move on to the next repo
checkpoint.stage = GithubConnectorStage.PRS
checkpoint.reset()
checkpoint.has_more = len(checkpoint.cached_repo_ids) > 0
if checkpoint.cached_repo_ids:
next_id = checkpoint.cached_repo_ids.pop()
next_repo = self.github_client.get_repo(next_id)
checkpoint.cached_repo = SerializedRepository(
id=next_id,
headers=next_repo.raw_headers,
raw_data=next_repo.raw_data,
)
checkpoint.stage = GithubConnectorStage.PRS
checkpoint.reset()
if checkpoint.cached_repo_ids:
logging.info(
f"{len(checkpoint.cached_repo_ids)} repos remaining (IDs: {checkpoint.cached_repo_ids})"
)
else:
logging.info("No more repos remaining")
return checkpoint
def _load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GithubConnectorCheckpoint,
include_permissions: bool = False,
) -> CheckpointOutput[GithubConnectorCheckpoint]:
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
# add a day for timezone safety
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc) + ONE_DAY
# Move start time back by 3 hours, since some Issues/PRs are getting dropped
# Could be due to delayed processing on GitHub side
# The non-updated issues since last poll will be shortcut-ed and not embedded
# adjusted_start_datetime = start_datetime - timedelta(hours=3)
adjusted_start_datetime = start_datetime
epoch = datetime.fromtimestamp(0, tz=timezone.utc)
if adjusted_start_datetime < epoch:
adjusted_start_datetime = epoch
return self._fetch_from_github(
checkpoint,
start=adjusted_start_datetime,
end=end_datetime,
include_permissions=include_permissions,
)
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GithubConnectorCheckpoint,
) -> CheckpointOutput[GithubConnectorCheckpoint]:
return self._load_from_checkpoint(
start, end, checkpoint, include_permissions=False
)
@override
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GithubConnectorCheckpoint,
) -> CheckpointOutput[GithubConnectorCheckpoint]:
return self._load_from_checkpoint(
start, end, checkpoint, include_permissions=True
)
def validate_connector_settings(self) -> None:
if self.github_client is None:
raise ConnectorMissingCredentialError("GitHub credentials not loaded.")
if not self.repo_owner:
raise ConnectorValidationError(
"Invalid connector settings: 'repo_owner' must be provided."
)
try:
if self.repositories:
if "," in self.repositories:
# Multiple repositories specified
repo_names = [name.strip() for name in self.repositories.split(",")]
if not repo_names:
raise ConnectorValidationError(
"Invalid connector settings: No valid repository names provided."
)
# Validate at least one repository exists and is accessible
valid_repos = False
validation_errors = []
for repo_name in repo_names:
if not repo_name:
continue
try:
test_repo = self.github_client.get_repo(
f"{self.repo_owner}/{repo_name}"
)
logging.info(
f"Successfully accessed repository: {self.repo_owner}/{repo_name}"
)
test_repo.get_contents("")
valid_repos = True
# If at least one repo is valid, we can proceed
break
except GithubException as e:
validation_errors.append(
f"Repository '{repo_name}': {e.data.get('message', str(e))}"
)
if not valid_repos:
error_msg = (
"None of the specified repositories could be accessed: "
)
error_msg += ", ".join(validation_errors)
raise ConnectorValidationError(error_msg)
else:
# Single repository (backward compatibility)
test_repo = self.github_client.get_repo(
f"{self.repo_owner}/{self.repositories}"
)
test_repo.get_contents("")
else:
# Try to get organization first
try:
org = self.github_client.get_organization(self.repo_owner)
total_count = org.get_repos().totalCount
if total_count == 0:
raise ConnectorValidationError(
f"Found no repos for organization: {self.repo_owner}. "
"Does the credential have the right scopes?"
)
except GithubException as e:
# Check for missing SSO
MISSING_SSO_ERROR_MESSAGE = "You must grant your Personal Access token access to this organization".lower()
if MISSING_SSO_ERROR_MESSAGE in str(e).lower():
SSO_GUIDE_LINK = (
"https://docs.github.com/en/enterprise-cloud@latest/authentication/"
"authenticating-with-saml-single-sign-on/"
"authorizing-a-personal-access-token-for-use-with-saml-single-sign-on"
)
raise ConnectorValidationError(
f"Your GitHub token is missing authorization to access the "
f"`{self.repo_owner}` organization. Please follow the guide to "
f"authorize your token: {SSO_GUIDE_LINK}"
)
# If not an org, try as a user
user = self.github_client.get_user(self.repo_owner)
# Check if we can access any repos
total_count = user.get_repos().totalCount
if total_count == 0:
raise ConnectorValidationError(
f"Found no repos for user: {self.repo_owner}. "
"Does the credential have the right scopes?"
)
except RateLimitExceededException:
raise UnexpectedValidationError(
"Validation failed due to GitHub rate-limits being exceeded. Please try again later."
)
except GithubException as e:
if e.status == 401:
raise CredentialExpiredError(
"GitHub credential appears to be invalid or expired (HTTP 401)."
)
elif e.status == 403:
raise InsufficientPermissionsError(
"Your GitHub token does not have sufficient permissions for this repository (HTTP 403)."
)
elif e.status == 404:
if self.repositories:
if "," in self.repositories:
raise ConnectorValidationError(
f"None of the specified GitHub repositories could be found for owner: {self.repo_owner}"
)
else:
raise ConnectorValidationError(
f"GitHub repository not found with name: {self.repo_owner}/{self.repositories}"
)
else:
raise ConnectorValidationError(
f"GitHub user or organization not found: {self.repo_owner}"
)
else:
raise ConnectorValidationError(
f"Unexpected GitHub error (status={e.status}): {e.data}"
)
except Exception as exc:
raise Exception(
f"Unexpected error during GitHub settings validation: {exc}"
)
def validate_checkpoint_json(
self, checkpoint_json: str
) -> GithubConnectorCheckpoint:
return GithubConnectorCheckpoint.model_validate_json(checkpoint_json)
def build_dummy_checkpoint(self) -> GithubConnectorCheckpoint:
return GithubConnectorCheckpoint(
stage=GithubConnectorStage.PRS, curr_page=0, has_more=True, num_retrieved=0
)
if __name__ == "__main__":
# Initialize the connector
connector = GithubConnector(
repo_owner="EvoAgentX",
repositories="EvoAgentX",
include_issues=True,
include_prs=False,
)
connector.load_credentials(
{"github_access_token": "<Your_GitHub_Access_Token>"}
)
if connector.github_client:
get_external_access_permission(
connector.get_github_repos(connector.github_client).pop(),
connector.github_client,
)
# Create a time range from epoch to now
end_time = datetime.now(timezone.utc)
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
time_range = (start_time, end_time)
# Initialize the runner with a batch size of 10
runner: ConnectorRunner[GithubConnectorCheckpoint] = ConnectorRunner(
connector, batch_size=10, include_permissions=False, time_range=time_range
)
# Get initial checkpoint
checkpoint = connector.build_dummy_checkpoint()
# Run the connector
while checkpoint.has_more:
for doc_batch, failure, next_checkpoint in runner.run(checkpoint):
if doc_batch:
print(f"Retrieved batch of {len(doc_batch)} documents")
for doc in doc_batch:
print(f"Document: {doc.semantic_identifier}")
if failure:
print(f"Failure: {failure.failure_message}")
if next_checkpoint:
checkpoint = next_checkpoint | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/github/connector.py",
"license": "Apache License 2.0",
"lines": 865,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/github/models.py | from typing import Any
from github import Repository
from github.Requester import Requester
from pydantic import BaseModel
class SerializedRepository(BaseModel):
# id is part of the raw_data as well, just pulled out for convenience
id: int
headers: dict[str, str | int]
raw_data: dict[str, Any]
def to_Repository(self, requester: Requester) -> Repository.Repository:
return Repository.Repository(
requester, self.headers, self.raw_data, completed=True
) | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/github/models.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/github/rate_limit_utils.py | import time
import logging
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from github import Github
def sleep_after_rate_limit_exception(github_client: Github) -> None:
"""
Sleep until the GitHub rate limit resets.
Args:
github_client: The GitHub client that hit the rate limit
"""
sleep_time = github_client.get_rate_limit().core.reset.replace(
tzinfo=timezone.utc
) - datetime.now(tz=timezone.utc)
sleep_time += timedelta(minutes=1) # add an extra minute just to be safe
logging.info(
"Ran into Github rate-limit. Sleeping %s seconds.", sleep_time.seconds
)
time.sleep(sleep_time.total_seconds()) | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/github/rate_limit_utils.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/github/utils.py | import logging
from github import Github
from github.Repository import Repository
from common.data_source.models import ExternalAccess
from .models import SerializedRepository
def get_external_access_permission(
repo: Repository, github_client: Github
) -> ExternalAccess:
"""
Get the external access permission for a repository.
This functionality requires Enterprise Edition.
"""
# RAGFlow doesn't implement the Onyx EE external-permissions system.
# Default to private/unknown permissions.
return ExternalAccess.empty()
def deserialize_repository(
cached_repo: SerializedRepository, github_client: Github
) -> Repository:
"""
Deserialize a SerializedRepository back into a Repository object.
"""
# Try to access the requester - different PyGithub versions may use different attribute names
try:
# Try to get the requester using getattr to avoid linter errors
requester = getattr(github_client, "_requester", None)
if requester is None:
requester = getattr(github_client, "_Github__requester", None)
if requester is None:
# If we can't find the requester attribute, we need to fall back to recreating the repo
raise AttributeError("Could not find requester attribute")
return cached_repo.to_Repository(requester)
except Exception as e:
# If all else fails, re-fetch the repo directly
logging.warning("Failed to deserialize repository: %s. Attempting to re-fetch.", e)
repo_id = cached_repo.id
return github_client.get_repo(repo_id) | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/github/utils.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/gitlab_connector.py | import fnmatch
import itertools
from collections import deque
from collections.abc import Iterable
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
from typing import TypeVar
import gitlab
from gitlab.v4.objects import Project
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE
from common.data_source.exceptions import ConnectorMissingCredentialError
from common.data_source.exceptions import ConnectorValidationError
from common.data_source.exceptions import CredentialExpiredError
from common.data_source.exceptions import InsufficientPermissionsError
from common.data_source.exceptions import UnexpectedValidationError
from common.data_source.interfaces import GenerateDocumentsOutput
from common.data_source.interfaces import LoadConnector
from common.data_source.interfaces import PollConnector
from common.data_source.interfaces import SecondsSinceUnixEpoch
from common.data_source.models import BasicExpertInfo
from common.data_source.models import Document
from common.data_source.utils import get_file_ext
T = TypeVar("T")
# List of directories/Files to exclude
exclude_patterns = [
"logs",
".github/",
".gitlab/",
".pre-commit-config.yaml",
]
def _batch_gitlab_objects(git_objs: Iterable[T], batch_size: int) -> Iterator[list[T]]:
it = iter(git_objs)
while True:
batch = list(itertools.islice(it, batch_size))
if not batch:
break
yield batch
def get_author(author: Any) -> BasicExpertInfo:
return BasicExpertInfo(
display_name=author.get("name"),
)
def _convert_merge_request_to_document(mr: Any) -> Document:
mr_text = mr.description or ""
doc = Document(
id=mr.web_url,
blob=mr_text,
source=DocumentSource.GITLAB,
semantic_identifier=mr.title,
extension=".md",
# updated_at is UTC time but is timezone unaware, explicitly add UTC
# as there is logic in indexing to prevent wrong timestamped docs
# due to local time discrepancies with UTC
doc_updated_at=mr.updated_at.replace(tzinfo=timezone.utc),
size_bytes=len(mr_text.encode("utf-8")),
primary_owners=[get_author(mr.author)],
metadata={"state": mr.state, "type": "MergeRequest", "web_url": mr.web_url},
)
return doc
def _convert_issue_to_document(issue: Any) -> Document:
issue_text = issue.description or ""
doc = Document(
id=issue.web_url,
blob=issue_text,
source=DocumentSource.GITLAB,
semantic_identifier=issue.title,
extension=".md",
# updated_at is UTC time but is timezone unaware, explicitly add UTC
# as there is logic in indexing to prevent wrong timestamped docs
# due to local time discrepancies with UTC
doc_updated_at=issue.updated_at.replace(tzinfo=timezone.utc),
size_bytes=len(issue_text.encode("utf-8")),
primary_owners=[get_author(issue.author)],
metadata={
"state": issue.state,
"type": issue.type if issue.type else "Issue",
"web_url": issue.web_url,
},
)
return doc
def _convert_code_to_document(
project: Project, file: Any, url: str, projectName: str, projectOwner: str
) -> Document:
# Dynamically get the default branch from the project object
default_branch = project.default_branch
# Fetch the file content using the correct branch
file_content_obj = project.files.get(
file_path=file["path"], ref=default_branch # Use the default branch
)
# BoxConnector uses raw bytes for blob. Keep the same here.
file_content_bytes = file_content_obj.decode()
file_url = f"{url}/{projectOwner}/{projectName}/-/blob/{default_branch}/{file['path']}"
# Try to use the last commit timestamp for incremental sync.
# Falls back to "now" if the commit lookup fails.
last_commit_at = None
try:
# Query commit history for this file on the default branch.
commits = project.commits.list(
ref_name=default_branch,
path=file["path"],
per_page=1,
)
if commits:
# committed_date is ISO string like "2024-01-01T00:00:00.000+00:00"
committed_date = commits[0].committed_date
if isinstance(committed_date, str):
last_commit_at = datetime.strptime(
committed_date, "%Y-%m-%dT%H:%M:%S.%f%z"
).astimezone(timezone.utc)
elif isinstance(committed_date, datetime):
last_commit_at = committed_date.astimezone(timezone.utc)
except Exception:
last_commit_at = None
# Create and return a Document object
doc = Document(
# Use a stable ID so reruns don't create duplicates.
id=file_url,
blob=file_content_bytes,
source=DocumentSource.GITLAB,
semantic_identifier=file.get("name"),
extension=get_file_ext(file.get("name")),
doc_updated_at=last_commit_at or datetime.now(tz=timezone.utc),
size_bytes=len(file_content_bytes) if file_content_bytes is not None else 0,
primary_owners=[], # Add owners if needed
metadata={
"type": "CodeFile",
"path": file.get("path"),
"ref": default_branch,
"project": f"{projectOwner}/{projectName}",
"web_url": file_url,
},
)
return doc
def _should_exclude(path: str) -> bool:
"""Check if a path matches any of the exclude patterns."""
return any(fnmatch.fnmatch(path, pattern) for pattern in exclude_patterns)
class GitlabConnector(LoadConnector, PollConnector):
def __init__(
self,
project_owner: str,
project_name: str,
batch_size: int = INDEX_BATCH_SIZE,
state_filter: str = "all",
include_mrs: bool = True,
include_issues: bool = True,
include_code_files: bool = False,
) -> None:
self.project_owner = project_owner
self.project_name = project_name
self.batch_size = batch_size
self.state_filter = state_filter
self.include_mrs = include_mrs
self.include_issues = include_issues
self.include_code_files = include_code_files
self.gitlab_client: gitlab.Gitlab | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self.gitlab_client = gitlab.Gitlab(
credentials["gitlab_url"], private_token=credentials["gitlab_access_token"]
)
return None
def validate_connector_settings(self) -> None:
if self.gitlab_client is None:
raise ConnectorMissingCredentialError("GitLab")
try:
self.gitlab_client.auth()
self.gitlab_client.projects.get(
f"{self.project_owner}/{self.project_name}",
lazy=True,
)
except gitlab.exceptions.GitlabAuthenticationError as e:
raise CredentialExpiredError(
"Invalid or expired GitLab credentials."
) from e
except gitlab.exceptions.GitlabAuthorizationError as e:
raise InsufficientPermissionsError(
"Insufficient permissions to access GitLab resources."
) from e
except gitlab.exceptions.GitlabGetError as e:
raise ConnectorValidationError(
"GitLab project not found or not accessible."
) from e
except Exception as e:
raise UnexpectedValidationError(
f"Unexpected error while validating GitLab settings: {e}"
) from e
def _fetch_from_gitlab(
self, start: datetime | None = None, end: datetime | None = None
) -> GenerateDocumentsOutput:
if self.gitlab_client is None:
raise ConnectorMissingCredentialError("Gitlab")
project: Project = self.gitlab_client.projects.get(
f"{self.project_owner}/{self.project_name}"
)
start_utc = start.astimezone(timezone.utc) if start else None
end_utc = end.astimezone(timezone.utc) if end else None
# Fetch code files
if self.include_code_files:
# Fetching using BFS as project.report_tree with recursion causing slow load
queue = deque([""]) # Start with the root directory
while queue:
current_path = queue.popleft()
files = project.repository_tree(path=current_path, all=True)
for file_batch in _batch_gitlab_objects(files, self.batch_size):
code_doc_batch: list[Document] = []
for file in file_batch:
if _should_exclude(file["path"]):
continue
if file["type"] == "blob":
doc = _convert_code_to_document(
project,
file,
self.gitlab_client.url,
self.project_name,
self.project_owner,
)
# Apply incremental window filtering for code files too.
if start_utc is not None and doc.doc_updated_at <= start_utc:
continue
if end_utc is not None and doc.doc_updated_at > end_utc:
continue
code_doc_batch.append(doc)
elif file["type"] == "tree":
queue.append(file["path"])
if code_doc_batch:
yield code_doc_batch
if self.include_mrs:
merge_requests = project.mergerequests.list(
state=self.state_filter,
order_by="updated_at",
sort="desc",
iterator=True,
)
for mr_batch in _batch_gitlab_objects(merge_requests, self.batch_size):
mr_doc_batch: list[Document] = []
for mr in mr_batch:
mr.updated_at = datetime.strptime(
mr.updated_at, "%Y-%m-%dT%H:%M:%S.%f%z"
)
if start_utc is not None and mr.updated_at <= start_utc:
yield mr_doc_batch
return
if end_utc is not None and mr.updated_at > end_utc:
continue
mr_doc_batch.append(_convert_merge_request_to_document(mr))
yield mr_doc_batch
if self.include_issues:
issues = project.issues.list(state=self.state_filter, iterator=True)
for issue_batch in _batch_gitlab_objects(issues, self.batch_size):
issue_doc_batch: list[Document] = []
for issue in issue_batch:
issue.updated_at = datetime.strptime(
issue.updated_at, "%Y-%m-%dT%H:%M:%S.%f%z"
)
# Avoid re-syncing the last-seen item.
if start_utc is not None and issue.updated_at <= start_utc:
yield issue_doc_batch
return
if end_utc is not None and issue.updated_at > end_utc:
continue
issue_doc_batch.append(_convert_issue_to_document(issue))
yield issue_doc_batch
def load_from_state(self) -> GenerateDocumentsOutput:
return self._fetch_from_gitlab()
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
return self._fetch_from_gitlab(start_datetime, end_datetime)
if __name__ == "__main__":
import os
connector = GitlabConnector(
# gitlab_url="https://gitlab.com/api/v4",
project_owner=os.environ["PROJECT_OWNER"],
project_name=os.environ["PROJECT_NAME"],
batch_size=INDEX_BATCH_SIZE,
state_filter="all",
include_mrs=True,
include_issues=True,
include_code_files=True,
)
connector.load_credentials(
{
"gitlab_access_token": os.environ["GITLAB_ACCESS_TOKEN"],
"gitlab_url": os.environ["GITLAB_URL"],
}
)
document_batches = connector.load_from_state()
for f in document_batches:
print("Batch:", f)
print("Finished loading from state.") | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/gitlab_connector.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/asana_connector.py | from collections.abc import Iterator
import time
from datetime import datetime
import logging
from typing import Any, Dict
import asana
import requests
from common.data_source.config import CONTINUE_ON_CONNECTOR_FAILURE, INDEX_BATCH_SIZE, DocumentSource
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, GenerateDocumentsOutput, SecondsSinceUnixEpoch
from common.data_source.utils import extract_size_bytes, get_file_ext
# https://github.com/Asana/python-asana/tree/master?tab=readme-ov-file#documentation-for-api-endpoints
class AsanaTask:
def __init__(
self,
id: str,
title: str,
text: str,
link: str,
last_modified: datetime,
project_gid: str,
project_name: str,
) -> None:
self.id = id
self.title = title
self.text = text
self.link = link
self.last_modified = last_modified
self.project_gid = project_gid
self.project_name = project_name
def __str__(self) -> str:
return f"ID: {self.id}\nTitle: {self.title}\nLast modified: {self.last_modified}\nText: {self.text}"
class AsanaAPI:
def __init__(
self, api_token: str, workspace_gid: str, team_gid: str | None
) -> None:
self._user = None
self.workspace_gid = workspace_gid
self.team_gid = team_gid
self.configuration = asana.Configuration()
self.api_client = asana.ApiClient(self.configuration)
self.tasks_api = asana.TasksApi(self.api_client)
self.attachments_api = asana.AttachmentsApi(self.api_client)
self.stories_api = asana.StoriesApi(self.api_client)
self.users_api = asana.UsersApi(self.api_client)
self.project_api = asana.ProjectsApi(self.api_client)
self.project_memberships_api = asana.ProjectMembershipsApi(self.api_client)
self.workspaces_api = asana.WorkspacesApi(self.api_client)
self.api_error_count = 0
self.configuration.access_token = api_token
self.task_count = 0
def get_tasks(
self, project_gids: list[str] | None, start_date: str
) -> Iterator[AsanaTask]:
"""Get all tasks from the projects with the given gids that were modified since the given date.
If project_gids is None, get all tasks from all projects in the workspace."""
logging.info("Starting to fetch Asana projects")
projects = self.project_api.get_projects(
opts={
"workspace": self.workspace_gid,
"opt_fields": "gid,name,archived,modified_at",
}
)
start_seconds = int(time.mktime(datetime.now().timetuple()))
projects_list = []
project_count = 0
for project_info in projects:
project_gid = project_info["gid"]
if project_gids is None or project_gid in project_gids:
projects_list.append(project_gid)
else:
logging.debug(
f"Skipping project: {project_gid} - not in accepted project_gids"
)
project_count += 1
if project_count % 100 == 0:
logging.info(f"Processed {project_count} projects")
logging.info(f"Found {len(projects_list)} projects to process")
for project_gid in projects_list:
for task in self._get_tasks_for_project(
project_gid, start_date, start_seconds
):
yield task
logging.info(f"Completed fetching {self.task_count} tasks from Asana")
if self.api_error_count > 0:
logging.warning(
f"Encountered {self.api_error_count} API errors during task fetching"
)
def _get_tasks_for_project(
self, project_gid: str, start_date: str, start_seconds: int
) -> Iterator[AsanaTask]:
project = self.project_api.get_project(project_gid, opts={})
project_name = project.get("name", project_gid)
team = project.get("team") or {}
team_gid = team.get("gid")
if project.get("archived"):
logging.info(f"Skipping archived project: {project_name} ({project_gid})")
return
if not team_gid:
logging.info(
f"Skipping project without a team: {project_name} ({project_gid})"
)
return
if project.get("privacy_setting") == "private":
if self.team_gid and team_gid != self.team_gid:
logging.info(
f"Skipping private project not in configured team: {project_name} ({project_gid})"
)
return
logging.info(
f"Processing private project in configured team: {project_name} ({project_gid})"
)
simple_start_date = start_date.split(".")[0].split("+")[0]
logging.info(
f"Fetching tasks modified since {simple_start_date} for project: {project_name} ({project_gid})"
)
opts = {
"opt_fields": "name,memberships,memberships.project,completed_at,completed_by,created_at,"
"created_by,custom_fields,dependencies,due_at,due_on,external,html_notes,liked,likes,"
"modified_at,notes,num_hearts,parent,projects,resource_subtype,resource_type,start_on,"
"workspace,permalink_url",
"modified_since": start_date,
}
tasks_from_api = self.tasks_api.get_tasks_for_project(project_gid, opts)
for data in tasks_from_api:
self.task_count += 1
if self.task_count % 10 == 0:
end_seconds = time.mktime(datetime.now().timetuple())
runtime_seconds = end_seconds - start_seconds
if runtime_seconds > 0:
logging.info(
f"Processed {self.task_count} tasks in {runtime_seconds:.0f} seconds "
f"({self.task_count / runtime_seconds:.2f} tasks/second)"
)
logging.debug(f"Processing Asana task: {data['name']}")
text = self._construct_task_text(data)
try:
text += self._fetch_and_add_comments(data["gid"])
last_modified_date = self.format_date(data["modified_at"])
text += f"Last modified: {last_modified_date}\n"
task = AsanaTask(
id=data["gid"],
title=data["name"],
text=text,
link=data["permalink_url"],
last_modified=datetime.fromisoformat(data["modified_at"]),
project_gid=project_gid,
project_name=project_name,
)
yield task
except Exception:
logging.error(
f"Error processing task {data['gid']} in project {project_gid}",
exc_info=True,
)
self.api_error_count += 1
def _construct_task_text(self, data: Dict) -> str:
text = f"{data['name']}\n\n"
if data["notes"]:
text += f"{data['notes']}\n\n"
if data["created_by"] and data["created_by"]["gid"]:
creator = self.get_user(data["created_by"]["gid"])["name"]
created_date = self.format_date(data["created_at"])
text += f"Created by: {creator} on {created_date}\n"
if data["due_on"]:
due_date = self.format_date(data["due_on"])
text += f"Due date: {due_date}\n"
if data["completed_at"]:
completed_date = self.format_date(data["completed_at"])
text += f"Completed on: {completed_date}\n"
text += "\n"
return text
def _fetch_and_add_comments(self, task_gid: str) -> str:
text = ""
stories_opts: Dict[str, str] = {}
story_start = time.time()
stories = self.stories_api.get_stories_for_task(task_gid, stories_opts)
story_count = 0
comment_count = 0
for story in stories:
story_count += 1
if story["resource_subtype"] == "comment_added":
comment = self.stories_api.get_story(
story["gid"], opts={"opt_fields": "text,created_by,created_at"}
)
commenter = self.get_user(comment["created_by"]["gid"])["name"]
text += f"Comment by {commenter}: {comment['text']}\n\n"
comment_count += 1
story_duration = time.time() - story_start
logging.debug(
f"Processed {story_count} stories (including {comment_count} comments) in {story_duration:.2f} seconds"
)
return text
def get_attachments(self, task_gid: str) -> list[dict]:
"""
Fetch full attachment info (including download_url) for a task.
"""
attachments: list[dict] = []
try:
# Step 1: list attachment compact records
for att in self.attachments_api.get_attachments_for_object(
parent=task_gid,
opts={}
):
gid = att.get("gid")
if not gid:
continue
try:
# Step 2: expand to full attachment
full = self.attachments_api.get_attachment(
attachment_gid=gid,
opts={
"opt_fields": "name,download_url,size,created_at"
}
)
if full.get("download_url"):
attachments.append(full)
except Exception:
logging.exception(
f"Failed to fetch attachment detail {gid} for task {task_gid}"
)
self.api_error_count += 1
except Exception:
logging.exception(f"Failed to list attachments for task {task_gid}")
self.api_error_count += 1
return attachments
def get_accessible_emails(
self,
workspace_id: str,
project_ids: list[str] | None,
team_id: str | None,
):
ws_users = self.users_api.get_users(
opts={
"workspace": workspace_id,
"opt_fields": "gid,name,email"
}
)
workspace_users = {
u["gid"]: u.get("email")
for u in ws_users
if u.get("email")
}
if not project_ids:
return set(workspace_users.values())
project_emails = set()
for pid in project_ids:
project = self.project_api.get_project(
pid,
opts={"opt_fields": "team,privacy_setting"}
)
if project["privacy_setting"] == "private":
if team_id and project.get("team", {}).get("gid") != team_id:
continue
memberships = self.project_memberships_api.get_project_membership(
pid,
opts={"opt_fields": "user.gid,user.email"}
)
for m in memberships:
email = m["user"].get("email")
if email:
project_emails.add(email)
return project_emails
def get_user(self, user_gid: str) -> Dict:
if self._user is not None:
return self._user
self._user = self.users_api.get_user(user_gid, {"opt_fields": "name,email"})
if not self._user:
logging.warning(f"Unable to fetch user information for user_gid: {user_gid}")
return {"name": "Unknown"}
return self._user
def format_date(self, date_str: str) -> str:
date = datetime.fromisoformat(date_str)
return time.strftime("%Y-%m-%d", date.timetuple())
def get_time(self) -> str:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class AsanaConnector(LoadConnector, PollConnector):
def __init__(
self,
asana_workspace_id: str,
asana_project_ids: str | None = None,
asana_team_id: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
continue_on_failure: bool = CONTINUE_ON_CONNECTOR_FAILURE,
) -> None:
self.workspace_id = asana_workspace_id
self.project_ids_to_index: list[str] | None = (
asana_project_ids.split(",") if asana_project_ids else None
)
self.asana_team_id = asana_team_id if asana_team_id else None
self.batch_size = batch_size
self.continue_on_failure = continue_on_failure
self.size_threshold = None
logging.info(
f"AsanaConnector initialized with workspace_id: {asana_workspace_id}"
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self.api_token = credentials["asana_api_token_secret"]
self.asana_client = AsanaAPI(
api_token=self.api_token,
workspace_gid=self.workspace_id,
team_gid=self.asana_team_id,
)
self.workspace_users_email = self.asana_client.get_accessible_emails(self.workspace_id, self.project_ids_to_index, self.asana_team_id)
logging.info("Asana credentials loaded and API client initialized")
return None
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch | None
) -> GenerateDocumentsOutput:
start_time = datetime.fromtimestamp(start).isoformat()
logging.info(f"Starting Asana poll from {start_time}")
docs_batch: list[Document] = []
tasks = self.asana_client.get_tasks(self.project_ids_to_index, start_time)
for task in tasks:
docs = self._task_to_documents(task)
docs_batch.extend(docs)
if len(docs_batch) >= self.batch_size:
logging.info(f"Yielding batch of {len(docs_batch)} documents")
yield docs_batch
docs_batch = []
if docs_batch:
logging.info(f"Yielding final batch of {len(docs_batch)} documents")
yield docs_batch
logging.info("Asana poll completed")
def load_from_state(self) -> GenerateDocumentsOutput:
logging.info("Starting full index of all Asana tasks")
return self.poll_source(start=0, end=None)
def _task_to_documents(self, task: AsanaTask) -> list[Document]:
docs: list[Document] = []
attachments = self.asana_client.get_attachments(task.id)
for att in attachments:
try:
resp = requests.get(att["download_url"], timeout=30)
resp.raise_for_status()
file_blob = resp.content
filename = att.get("name", "attachment")
size_bytes = extract_size_bytes(att)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{filename} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
docs.append(
Document(
id=f"asana:{task.id}:{att['gid']}",
blob=file_blob,
extension=get_file_ext(filename) or "",
size_bytes=size_bytes,
doc_updated_at=task.last_modified,
source=DocumentSource.ASANA,
semantic_identifier=filename,
primary_owners=list(self.workspace_users_email),
)
)
except Exception:
logging.exception(
f"Failed to download attachment {att.get('gid')} for task {task.id}"
)
return docs
if __name__ == "__main__":
import time
import os
logging.info("Starting Asana connector test")
connector = AsanaConnector(
os.environ["WORKSPACE_ID"],
os.environ["PROJECT_IDS"],
os.environ["TEAM_ID"],
)
connector.load_credentials(
{
"asana_api_token_secret": os.environ["API_TOKEN"],
}
)
logging.info("Loading all documents from Asana")
all_docs = connector.load_from_state()
current = time.time()
one_day_ago = current - 24 * 60 * 60 # 1 day
logging.info("Polling for documents updated in the last 24 hours")
latest_docs = connector.poll_source(one_day_ago, current)
for docs in all_docs:
for doc in docs:
print(doc.id)
logging.info("Asana connector test completed") | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/asana_connector.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/db/joint_services/memory_message_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import List
from common import settings
from common.time_utils import current_timestamp, timestamp_to_date, format_iso_8601_to_ymd_hms
from common.constants import MemoryType, LLMType
from common.doc_store.doc_store_base import FusionExpr
from common.misc_utils import get_uuid
from api.db.db_utils import bulk_insert_into_db
from api.db.db_models import Task
from api.db.services.task_service import TaskService
from api.db.services.memory_service import MemoryService
from api.db.services.tenant_llm_service import TenantLLMService
from api.db.services.llm_service import LLMBundle
from api.utils.memory_utils import get_memory_type_human
from memory.services.messages import MessageService
from memory.services.query import MsgTextQuery, get_vector
from memory.utils.prompt_util import PromptAssembler
from memory.utils.msg_util import get_json_result_from_llm_response
from rag.utils.redis_conn import REDIS_CONN
async def save_to_memory(memory_id: str, message_dict: dict):
"""
:param memory_id:
:param message_dict: {
"user_id": str,
"agent_id": str,
"session_id": str,
"user_input": str,
"agent_response": str
}
"""
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
return False, f"Memory '{memory_id}' not found."
tenant_id = memory.tenant_id
extracted_content = await extract_by_llm(
tenant_id,
memory.llm_id,
{"temperature": memory.temperature},
get_memory_type_human(memory.memory_type),
message_dict.get("user_input", ""),
message_dict.get("agent_response", "")
) if memory.memory_type != MemoryType.RAW.value else [] # if only RAW, no need to extract
raw_message_id = REDIS_CONN.generate_auto_increment_id(namespace="memory")
message_list = [{
"message_id": raw_message_id,
"message_type": MemoryType.RAW.name.lower(),
"source_id": 0,
"memory_id": memory_id,
"user_id": "",
"agent_id": message_dict["agent_id"],
"session_id": message_dict["session_id"],
"content": f"User Input: {message_dict.get('user_input')}\nAgent Response: {message_dict.get('agent_response')}",
"valid_at": timestamp_to_date(current_timestamp()),
"invalid_at": None,
"forget_at": None,
"status": True
}, *[{
"message_id": REDIS_CONN.generate_auto_increment_id(namespace="memory"),
"message_type": content["message_type"],
"source_id": raw_message_id,
"memory_id": memory_id,
"user_id": "",
"agent_id": message_dict["agent_id"],
"session_id": message_dict["session_id"],
"content": content["content"],
"valid_at": content["valid_at"],
"invalid_at": content["invalid_at"] if content["invalid_at"] else None,
"forget_at": None,
"status": True
} for content in extracted_content]]
return await embed_and_save(memory, message_list)
async def save_extracted_to_memory_only(memory_id: str, message_dict, source_message_id: int, task_id: str=None):
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
msg = f"Memory '{memory_id}' not found."
if task_id:
TaskService.update_progress(task_id, {"progress": -1, "progress_msg": timestamp_to_date(current_timestamp())+ " " + msg})
return False, msg
if memory.memory_type == MemoryType.RAW.value:
msg = f"Memory '{memory_id}' don't need to extract."
if task_id:
TaskService.update_progress(task_id, {"progress": 1.0, "progress_msg": timestamp_to_date(current_timestamp())+ " " + msg})
return True, msg
tenant_id = memory.tenant_id
extracted_content = await extract_by_llm(
tenant_id,
memory.llm_id,
{"temperature": memory.temperature},
get_memory_type_human(memory.memory_type),
message_dict.get("user_input", ""),
message_dict.get("agent_response", ""),
task_id=task_id
)
message_list = [{
"message_id": REDIS_CONN.generate_auto_increment_id(namespace="memory"),
"message_type": content["message_type"],
"source_id": source_message_id,
"memory_id": memory_id,
"user_id": "",
"agent_id": message_dict["agent_id"],
"session_id": message_dict["session_id"],
"content": content["content"],
"valid_at": content["valid_at"],
"invalid_at": content["invalid_at"] if content["invalid_at"] else None,
"forget_at": None,
"status": True
} for content in extracted_content]
if not message_list:
msg = "No memory extracted from raw message."
if task_id:
TaskService.update_progress(task_id, {"progress": 1.0, "progress_msg": timestamp_to_date(current_timestamp())+ " " + msg})
return True, msg
if task_id:
TaskService.update_progress(task_id, {"progress": 0.5, "progress_msg": timestamp_to_date(current_timestamp())+ " " + f"Extracted {len(message_list)} messages from raw dialogue."})
return await embed_and_save(memory, message_list, task_id)
async def extract_by_llm(tenant_id: str, llm_id: str, extract_conf: dict, memory_type: List[str], user_input: str,
agent_response: str, system_prompt: str = "", user_prompt: str="", task_id: str=None) -> List[dict]:
llm_type = TenantLLMService.llm_id2llm_type(llm_id)
if not llm_type:
raise RuntimeError(f"Unknown type of LLM '{llm_id}'")
if not system_prompt:
system_prompt = PromptAssembler.assemble_system_prompt({"memory_type": memory_type})
conversation_content = f"User Input: {user_input}\nAgent Response: {agent_response}"
conversation_time = timestamp_to_date(current_timestamp())
user_prompts = []
if user_prompt:
user_prompts.append({"role": "user", "content": user_prompt})
user_prompts.append({"role": "user", "content": f"Conversation: {conversation_content}\nConversation Time: {conversation_time}\nCurrent Time: {conversation_time}"})
else:
user_prompts.append({"role": "user", "content": PromptAssembler.assemble_user_prompt(conversation_content, conversation_time, conversation_time)})
llm = LLMBundle(tenant_id, llm_type, llm_id)
if task_id:
TaskService.update_progress(task_id, {"progress": 0.15, "progress_msg": timestamp_to_date(current_timestamp())+ " " + "Prepared prompts and LLM."})
res = await llm.async_chat(system_prompt, user_prompts, extract_conf)
res_json = get_json_result_from_llm_response(res)
if task_id:
TaskService.update_progress(task_id, {"progress": 0.35, "progress_msg": timestamp_to_date(current_timestamp())+ " " + "Get extracted result from LLM."})
return [{
"content": extracted_content["content"],
"valid_at": format_iso_8601_to_ymd_hms(extracted_content["valid_at"]),
"invalid_at": format_iso_8601_to_ymd_hms(extracted_content["invalid_at"]) if extracted_content.get("invalid_at") else "",
"message_type": message_type
} for message_type, extracted_content_list in res_json.items() for extracted_content in extracted_content_list]
async def embed_and_save(memory, message_list: list[dict], task_id: str=None):
embedding_model = LLMBundle(memory.tenant_id, llm_type=LLMType.EMBEDDING, llm_name=memory.embd_id)
if task_id:
TaskService.update_progress(task_id, {"progress": 0.65, "progress_msg": timestamp_to_date(current_timestamp())+ " " + "Prepared embedding model."})
vector_list, _ = embedding_model.encode([msg["content"] for msg in message_list])
for idx, msg in enumerate(message_list):
msg["content_embed"] = vector_list[idx]
if task_id:
TaskService.update_progress(task_id, {"progress": 0.85, "progress_msg": timestamp_to_date(current_timestamp())+ " " + "Embedded extracted content."})
vector_dimension = len(vector_list[0])
if not MessageService.has_index(memory.tenant_id, memory.id):
created = MessageService.create_index(memory.tenant_id, memory.id, vector_size=vector_dimension)
if not created:
error_msg = "Failed to create message index."
if task_id:
TaskService.update_progress(task_id, {"progress": -1, "progress_msg": timestamp_to_date(current_timestamp())+ " " + error_msg})
return False, error_msg
new_msg_size = sum([MessageService.calculate_message_size(m) for m in message_list])
current_memory_size = get_memory_size_cache(memory.tenant_id, memory.id)
if new_msg_size + current_memory_size > memory.memory_size:
size_to_delete = current_memory_size + new_msg_size - memory.memory_size
if memory.forgetting_policy == "FIFO":
message_ids_to_delete, delete_size = MessageService.pick_messages_to_delete_by_fifo(memory.id, memory.tenant_id,
size_to_delete)
MessageService.delete_message({"message_id": message_ids_to_delete}, memory.tenant_id, memory.id)
decrease_memory_size_cache(memory.id, delete_size)
else:
error_msg = "Failed to insert message into memory. Memory size reached limit and cannot decide which to delete."
if task_id:
TaskService.update_progress(task_id, {"progress": -1, "progress_msg": timestamp_to_date(current_timestamp())+ " " + error_msg})
return False, error_msg
fail_cases = MessageService.insert_message(message_list, memory.tenant_id, memory.id)
if fail_cases:
error_msg = "Failed to insert message into memory. Details: " + "; ".join(fail_cases)
if task_id:
TaskService.update_progress(task_id, {"progress": -1, "progress_msg": timestamp_to_date(current_timestamp())+ " " + error_msg})
return False, error_msg
if task_id:
TaskService.update_progress(task_id, {"progress": 0.95, "progress_msg": timestamp_to_date(current_timestamp())+ " " + "Saved messages to storage."})
increase_memory_size_cache(memory.id, new_msg_size)
return True, "Message saved successfully."
def query_message(filter_dict: dict, params: dict):
"""
:param filter_dict: {
"memory_id": List[str],
"agent_id": optional
"session_id": optional
}
:param params: {
"query": question str,
"similarity_threshold": float,
"keywords_similarity_weight": float,
"top_n": int
}
"""
memory_ids = filter_dict["memory_id"]
memory_list = MemoryService.get_by_ids(memory_ids)
if not memory_list:
return []
condition_dict = {k: v for k, v in filter_dict.items() if v}
uids = [memory.tenant_id for memory in memory_list]
question = params["query"]
question = question.strip()
memory = memory_list[0]
embd_model = LLMBundle(memory.tenant_id, llm_type=LLMType.EMBEDDING, llm_name=memory.embd_id)
match_dense = get_vector(question, embd_model, similarity=params["similarity_threshold"])
match_text, _ = MsgTextQuery().question(question, min_match=params["similarity_threshold"])
keywords_similarity_weight = params.get("keywords_similarity_weight", 0.7)
fusion_expr = FusionExpr("weighted_sum", params["top_n"], {"weights": ",".join([str(1 - keywords_similarity_weight), str(keywords_similarity_weight)])})
return MessageService.search_message(memory_ids, condition_dict, uids, [match_text, match_dense, fusion_expr], params["top_n"])
def init_message_id_sequence():
message_id_redis_key = "id_generator:memory"
if REDIS_CONN.exist(message_id_redis_key):
current_max_id = REDIS_CONN.get(message_id_redis_key)
logging.info(f"No need to init message_id sequence, current max id is {current_max_id}.")
else:
max_id = 1
exist_memory_list = MemoryService.get_all_memory()
if not exist_memory_list:
REDIS_CONN.set(message_id_redis_key, max_id)
else:
max_id = MessageService.get_max_message_id(
uid_list=[m.tenant_id for m in exist_memory_list],
memory_ids=[m.id for m in exist_memory_list]
)
REDIS_CONN.set(message_id_redis_key, max_id)
logging.info(f"Init message_id sequence done, current max id is {max_id}.")
def get_memory_size_cache(memory_id: str, uid: str):
redis_key = f"memory_{memory_id}"
if REDIS_CONN.exist(redis_key):
return int(REDIS_CONN.get(redis_key))
else:
memory_size_map = MessageService.calculate_memory_size(
[memory_id],
[uid]
)
memory_size = memory_size_map.get(memory_id, 0)
set_memory_size_cache(memory_id, memory_size)
return memory_size
def set_memory_size_cache(memory_id: str, size: int):
redis_key = f"memory_{memory_id}"
return REDIS_CONN.set(redis_key, size)
def increase_memory_size_cache(memory_id: str, size: int):
redis_key = f"memory_{memory_id}"
return REDIS_CONN.incrby(redis_key, size)
def decrease_memory_size_cache(memory_id: str, size: int):
redis_key = f"memory_{memory_id}"
return REDIS_CONN.decrby(redis_key, size)
def init_memory_size_cache():
memory_list = MemoryService.get_all_memory()
if not memory_list:
logging.info("No memory found, no need to init memory size.")
else:
for m in memory_list:
get_memory_size_cache(m.id, m.tenant_id)
logging.info("Memory size cache init done.")
def fix_missing_tokenized_memory():
if settings.DOC_ENGINE != "elasticsearch":
logging.info("Not using elasticsearch as doc engine, no need to fix missing tokenized memory.")
return
memory_list = MemoryService.get_all_memory()
if not memory_list:
logging.info("No memory found, no need to fix missing tokenized memory.")
else:
for m in memory_list:
message_list = MessageService.get_missing_field_messages(m.id, m.tenant_id, "tokenized_content_ltks")
for msg in message_list:
# update content to refresh tokenized field
MessageService.update_message({"message_id": msg["message_id"], "memory_id": m.id}, {"content": msg["content"]}, m.tenant_id, m.id)
if message_list:
logging.info(f"Fixed {len(message_list)} messages missing tokenized field in memory: {m.name}.")
logging.info("Fix missing tokenized memory done.")
def judge_system_prompt_is_default(system_prompt: str, memory_type: int|list[str]):
memory_type_list = memory_type if isinstance(memory_type, list) else get_memory_type_human(memory_type)
return system_prompt == PromptAssembler.assemble_system_prompt({"memory_type": memory_type_list})
async def queue_save_to_memory_task(memory_ids: list[str], message_dict: dict):
"""
:param memory_ids:
:param message_dict: {
"user_id": str,
"agent_id": str,
"session_id": str,
"user_input": str,
"agent_response": str
}
"""
def new_task(_memory_id: str, _source_id: int):
return {
"id": get_uuid(),
"doc_id": _memory_id,
"task_type": "memory",
"progress": 0.0,
"digest": str(_source_id)
}
not_found_memory = []
failed_memory = []
for memory_id in memory_ids:
memory = MemoryService.get_by_memory_id(memory_id)
if not memory:
not_found_memory.append(memory_id)
continue
raw_message_id = REDIS_CONN.generate_auto_increment_id(namespace="memory")
raw_message = {
"message_id": raw_message_id,
"message_type": MemoryType.RAW.name.lower(),
"source_id": 0,
"memory_id": memory_id,
"user_id": "",
"agent_id": message_dict["agent_id"],
"session_id": message_dict["session_id"],
"content": f"User Input: {message_dict.get('user_input')}\nAgent Response: {message_dict.get('agent_response')}",
"valid_at": timestamp_to_date(current_timestamp()),
"invalid_at": None,
"forget_at": None,
"status": True
}
res, msg = await embed_and_save(memory, [raw_message])
if not res:
failed_memory.append({"memory_id": memory_id, "fail_msg": msg})
continue
task = new_task(memory_id, raw_message_id)
bulk_insert_into_db(Task, [task], replace_on_conflict=True)
task_message = {
"id": task["id"],
"task_id": task["id"],
"task_type": task["task_type"],
"memory_id": memory_id,
"source_id": raw_message_id,
"message_dict": message_dict
}
if not REDIS_CONN.queue_product(settings.get_svr_queue_name(priority=0), message=task_message):
failed_memory.append({"memory_id": memory_id, "fail_msg": "Can't access Redis."})
error_msg = ""
if not_found_memory:
error_msg = f"Memory {not_found_memory} not found."
if failed_memory:
error_msg += "".join([f"Memory {fm['memory_id']} failed. Detail: {fm['fail_msg']}" for fm in failed_memory])
if error_msg:
return False, error_msg
return True, "All add to task."
async def handle_save_to_memory_task(task_param: dict):
"""
:param task_param: {
"id": task_id
"memory_id": id
"source_id": id
"message_dict": {
"user_id": str,
"agent_id": str,
"session_id": str,
"user_input": str,
"agent_response": str
}
}
"""
_, task = TaskService.get_by_id(task_param["id"])
if not task:
return False, f"Task {task_param['id']} is not found."
if task.progress == -1:
return False, f"Task {task_param['id']} is already failed."
now_time = current_timestamp()
TaskService.update_by_id(task_param["id"], {"begin_at": timestamp_to_date(now_time)})
memory_id = task_param["memory_id"]
source_id = task_param["source_id"]
message_dict = task_param["message_dict"]
success, msg = await save_extracted_to_memory_only(memory_id, message_dict, source_id, task.id)
if success:
TaskService.update_progress(task.id, {"progress": 1.0, "progress_msg": timestamp_to_date(current_timestamp())+ " " + msg})
return True, msg
logging.error(msg)
TaskService.update_progress(task.id, {"progress": -1, "progress_msg": timestamp_to_date(current_timestamp())+ " " + msg})
return False, msg
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/joint_services/memory_message_service.py",
"license": "Apache License 2.0",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/doc_store/es_conn_base.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import json
import time
import os
from abc import abstractmethod
from elasticsearch import NotFoundError
from elasticsearch_dsl import Index
from elastic_transport import ConnectionTimeout
from elasticsearch.client import IndicesClient
from common.file_utils import get_project_base_directory
from common.misc_utils import convert_bytes
from common.doc_store.doc_store_base import DocStoreConnection, OrderByExpr, MatchExpr
from rag.nlp import is_english, rag_tokenizer
from common import settings
ATTEMPT_TIME = 2
class ESConnectionBase(DocStoreConnection):
def __init__(self, mapping_file_name: str="mapping.json", logger_name: str='ragflow.es_conn'):
from common.doc_store.es_conn_pool import ES_CONN
self.logger = logging.getLogger(logger_name)
self.info = {}
self.logger.info(f"Use Elasticsearch {settings.ES['hosts']} as the doc engine.")
self.es = ES_CONN.get_conn()
fp_mapping = os.path.join(get_project_base_directory(), "conf", mapping_file_name)
if not os.path.exists(fp_mapping):
msg = f"Elasticsearch mapping file not found at {fp_mapping}"
self.logger.error(msg)
raise Exception(msg)
with open(fp_mapping, "r") as f:
self.mapping = json.load(f)
self.logger.info(f"Elasticsearch {settings.ES['hosts']} is healthy.")
def _connect(self):
from common.doc_store.es_conn_pool import ES_CONN
if self.es.ping():
return True
self.es = ES_CONN.refresh_conn()
return True
"""
Database operations
"""
def db_type(self) -> str:
return "elasticsearch"
def health(self) -> dict:
health_dict = dict(self.es.cluster.health())
health_dict["type"] = "elasticsearch"
return health_dict
def get_cluster_stats(self):
"""
curl -XGET "http://{es_host}/_cluster/stats" -H "kbn-xsrf: reporting" to view raw stats.
"""
raw_stats = self.es.cluster.stats()
self.logger.debug(f"ESConnection.get_cluster_stats: {raw_stats}")
try:
res = {
'cluster_name': raw_stats['cluster_name'],
'status': raw_stats['status']
}
indices_status = raw_stats['indices']
res.update({
'indices': indices_status['count'],
'indices_shards': indices_status['shards']['total']
})
doc_info = indices_status['docs']
res.update({
'docs': doc_info['count'],
'docs_deleted': doc_info['deleted']
})
store_info = indices_status['store']
res.update({
'store_size': convert_bytes(store_info['size_in_bytes']),
'total_dataset_size': convert_bytes(store_info['total_data_set_size_in_bytes'])
})
mappings_info = indices_status['mappings']
res.update({
'mappings_fields': mappings_info['total_field_count'],
'mappings_deduplicated_fields': mappings_info['total_deduplicated_field_count'],
'mappings_deduplicated_size': convert_bytes(mappings_info['total_deduplicated_mapping_size_in_bytes'])
})
node_info = raw_stats['nodes']
res.update({
'nodes': node_info['count']['total'],
'nodes_version': node_info['versions'],
'os_mem': convert_bytes(node_info['os']['mem']['total_in_bytes']),
'os_mem_used': convert_bytes(node_info['os']['mem']['used_in_bytes']),
'os_mem_used_percent': node_info['os']['mem']['used_percent'],
'jvm_versions': node_info['jvm']['versions'][0]['vm_version'],
'jvm_heap_used': convert_bytes(node_info['jvm']['mem']['heap_used_in_bytes']),
'jvm_heap_max': convert_bytes(node_info['jvm']['mem']['heap_max_in_bytes'])
})
return res
except Exception as e:
self.logger.exception(f"ESConnection.get_cluster_stats: {e}")
return None
"""
Table operations
"""
def create_idx(self, index_name: str, dataset_id: str, vector_size: int, parser_id: str = None):
# parser_id is used by Infinity but not needed for ES (kept for interface compatibility)
if self.index_exist(index_name, dataset_id):
return True
try:
return IndicesClient(self.es).create(index=index_name,
settings=self.mapping["settings"],
mappings=self.mapping["mappings"])
except Exception:
self.logger.exception("ESConnection.createIndex error %s" % index_name)
def create_doc_meta_idx(self, index_name: str):
"""
Create a document metadata index.
Index name pattern: ragflow_doc_meta_{tenant_id}
- Per-tenant metadata index for storing document metadata fields
"""
if self.index_exist(index_name, ""):
return True
try:
fp_mapping = os.path.join(get_project_base_directory(), "conf", "doc_meta_es_mapping.json")
if not os.path.exists(fp_mapping):
self.logger.error(f"Document metadata mapping file not found at {fp_mapping}")
return False
with open(fp_mapping, "r") as f:
doc_meta_mapping = json.load(f)
return IndicesClient(self.es).create(index=index_name,
settings=doc_meta_mapping["settings"],
mappings=doc_meta_mapping["mappings"])
except Exception as e:
self.logger.exception(f"Error creating document metadata index {index_name}: {e}")
def delete_idx(self, index_name: str, dataset_id: str):
if len(dataset_id) > 0:
# The index need to be alive after any kb deletion since all kb under this tenant are in one index.
return
try:
self.es.indices.delete(index=index_name, allow_no_indices=True)
except NotFoundError:
pass
except Exception:
self.logger.exception("ESConnection.deleteIdx error %s" % index_name)
def index_exist(self, index_name: str, dataset_id: str = None) -> bool:
s = Index(index_name, self.es)
for i in range(ATTEMPT_TIME):
try:
return s.exists()
except ConnectionTimeout:
self.logger.exception("ES request timeout")
time.sleep(3)
self._connect()
continue
except Exception as e:
self.logger.exception(e)
break
return False
"""
CRUD operations
"""
def get(self, doc_id: str, index_name: str, dataset_ids: list[str]) -> dict | None:
for i in range(ATTEMPT_TIME):
try:
res = self.es.get(index=index_name,
id=doc_id, source=True, )
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
doc = res["_source"]
doc["id"] = doc_id
return doc
except NotFoundError:
return None
except Exception as e:
self.logger.exception(f"ESConnection.get({doc_id}) got exception")
raise e
self.logger.error(f"ESConnection.get timeout for {ATTEMPT_TIME} times!")
raise Exception("ESConnection.get timeout.")
@abstractmethod
def search(
self, select_fields: list[str],
highlight_fields: list[str],
condition: dict,
match_expressions: list[MatchExpr],
order_by: OrderByExpr,
offset: int,
limit: int,
index_names: str | list[str],
dataset_ids: list[str],
agg_fields: list[str] | None = None,
rank_feature: dict | None = None
):
raise NotImplementedError("Not implemented")
@abstractmethod
def insert(self, documents: list[dict], index_name: str, dataset_id: str = None) -> list[str]:
raise NotImplementedError("Not implemented")
@abstractmethod
def update(self, condition: dict, new_value: dict, index_name: str, dataset_id: str) -> bool:
raise NotImplementedError("Not implemented")
@abstractmethod
def delete(self, condition: dict, index_name: str, dataset_id: str) -> int:
raise NotImplementedError("Not implemented")
"""
Helper functions for search result
"""
def get_total(self, res):
if isinstance(res["hits"]["total"], type({})):
return res["hits"]["total"]["value"]
return res["hits"]["total"]
def get_doc_ids(self, res):
return [d["_id"] for d in res["hits"]["hits"]]
def _get_source(self, res):
rr = []
for d in res["hits"]["hits"]:
d["_source"]["id"] = d["_id"]
d["_source"]["_score"] = d["_score"]
rr.append(d["_source"])
return rr
@abstractmethod
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
raise NotImplementedError("Not implemented")
def get_highlight(self, res, keywords: list[str], field_name: str):
ans = {}
for d in res["hits"]["hits"]:
highlights = d.get("highlight")
if not highlights:
continue
txt = "...".join([a for a in list(highlights.items())[0][1]])
if not is_english(txt.split()):
ans[d["_id"]] = txt
continue
txt = d["_source"][field_name]
txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE | re.MULTILINE)
txt_list = []
for t in re.split(r"[.?!;\n]", txt):
for w in keywords:
t = re.sub(r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])" % re.escape(w), r"\1<em>\2</em>\3", t,
flags=re.IGNORECASE | re.MULTILINE)
if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE | re.MULTILINE):
continue
txt_list.append(t)
ans[d["_id"]] = "...".join(txt_list) if txt_list else "...".join([a for a in list(highlights.items())[0][1]])
return ans
def get_aggregation(self, res, field_name: str):
agg_field = "aggs_" + field_name
if "aggregations" not in res or agg_field not in res["aggregations"]:
return list()
buckets = res["aggregations"][agg_field]["buckets"]
return [(b["key"], b["doc_count"]) for b in buckets]
"""
SQL
"""
def sql(self, sql: str, fetch_size: int, format: str):
self.logger.debug(f"ESConnection.sql get sql: {sql}")
sql = re.sub(r"[ `]+", " ", sql)
sql = sql.replace("%", "")
replaces = []
for r in re.finditer(r" ([a-z_]+_l?tks)( like | ?= ?)'([^']+)'", sql):
fld, v = r.group(1), r.group(3)
match = " MATCH({}, '{}', 'operator=OR;minimum_should_match=30%') ".format(
fld, rag_tokenizer.fine_grained_tokenize(rag_tokenizer.tokenize(v)))
replaces.append(
("{}{}'{}'".format(
r.group(1),
r.group(2),
r.group(3)),
match))
for p, r in replaces:
sql = sql.replace(p, r, 1)
self.logger.debug(f"ESConnection.sql to es: {sql}")
for i in range(ATTEMPT_TIME):
try:
res = self.es.sql.query(body={"query": sql, "fetch_size": fetch_size}, format=format,
request_timeout="2s")
return res
except ConnectionTimeout:
self.logger.exception("ES request timeout")
time.sleep(3)
self._connect()
continue
except Exception as e:
self.logger.exception(f"ESConnection.sql got exception. SQL:\n{sql}")
raise Exception(f"SQL error: {e}\n\nSQL: {sql}")
self.logger.error(f"ESConnection.sql timeout for {ATTEMPT_TIME} times!")
return None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/doc_store/es_conn_base.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/doc_store/es_conn_pool.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
from elasticsearch import Elasticsearch
from common import settings
from common.decorator import singleton
ATTEMPT_TIME = 2
@singleton
class ElasticSearchConnectionPool:
def __init__(self):
if hasattr(settings, "ES"):
self.ES_CONFIG = settings.ES
else:
self.ES_CONFIG = settings.get_base_config("es", {})
for _ in range(ATTEMPT_TIME):
try:
if self._connect():
break
except Exception as e:
logging.warning(f"{str(e)}. Waiting Elasticsearch {self.ES_CONFIG['hosts']} to be healthy.")
time.sleep(5)
if not hasattr(self, "es_conn") or not self.es_conn or not self.es_conn.ping():
msg = f"Elasticsearch {self.ES_CONFIG['hosts']} is unhealthy in 10s."
logging.error(msg)
raise Exception(msg)
v = self.info.get("version", {"number": "8.11.3"})
v = v["number"].split(".")[0]
if int(v) < 8:
msg = f"Elasticsearch version must be greater than or equal to 8, current version: {v}"
logging.error(msg)
raise Exception(msg)
def _connect(self):
self.es_conn = Elasticsearch(
self.ES_CONFIG["hosts"].split(","),
basic_auth=(self.ES_CONFIG["username"], self.ES_CONFIG[
"password"]) if "username" in self.ES_CONFIG and "password" in self.ES_CONFIG else None,
verify_certs= self.ES_CONFIG.get("verify_certs", False),
timeout=600 )
if self.es_conn:
self.info = self.es_conn.info()
return True
return False
def get_conn(self):
return self.es_conn
def refresh_conn(self):
if self.es_conn.ping():
return self.es_conn
else:
# close current if exist
if self.es_conn:
self.es_conn.close()
self._connect()
return self.es_conn
def __del__(self):
if hasattr(self, "es_conn") and self.es_conn:
self.es_conn.close()
ES_CONN = ElasticSearchConnectionPool()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/doc_store/es_conn_pool.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/doc_store/infinity_conn_base.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import json
import time
from abc import abstractmethod
import infinity
from infinity.common import ConflictType
from infinity.index import IndexInfo, IndexType
from infinity.errors import ErrorCode
import pandas as pd
from common.file_utils import get_project_base_directory
from rag.nlp import is_english
from common import settings
from common.doc_store.doc_store_base import DocStoreConnection, MatchExpr, OrderByExpr
class InfinityConnectionBase(DocStoreConnection):
def __init__(self, mapping_file_name: str = "infinity_mapping.json", logger_name: str = "ragflow.infinity_conn", table_name_prefix: str="ragflow_"):
from common.doc_store.infinity_conn_pool import INFINITY_CONN
self.dbName = settings.INFINITY.get("db_name", "default_db")
self.mapping_file_name = mapping_file_name
self.logger = logging.getLogger(logger_name)
self.table_name_prefix = table_name_prefix
infinity_uri = settings.INFINITY["uri"]
if ":" in infinity_uri:
host, port = infinity_uri.split(":")
infinity_uri = infinity.common.NetworkAddress(host, int(port))
self.connPool = None
self.logger.info(f"Use Infinity {infinity_uri} as the doc engine.")
conn_pool = INFINITY_CONN.get_conn_pool()
for _ in range(24):
try:
inf_conn = conn_pool.get_conn()
res = inf_conn.show_current_node()
if res.error_code == ErrorCode.OK and res.server_status in ["started", "alive"]:
self._migrate_db(inf_conn)
self.connPool = conn_pool
conn_pool.release_conn(inf_conn)
break
conn_pool.release_conn(inf_conn)
self.logger.warning(f"Infinity status: {res.server_status}. Waiting Infinity {infinity_uri} to be healthy.")
time.sleep(5)
except Exception as e:
conn_pool = INFINITY_CONN.refresh_conn_pool()
self.logger.warning(f"{str(e)}. Waiting Infinity {infinity_uri} to be healthy.")
time.sleep(5)
if self.connPool is None:
msg = f"Infinity {infinity_uri} is unhealthy in 120s."
self.logger.error(msg)
raise Exception(msg)
self.logger.info(f"Infinity {infinity_uri} is healthy.")
def _migrate_db(self, inf_conn):
inf_db = inf_conn.create_database(self.dbName, ConflictType.Ignore)
fp_mapping = os.path.join(get_project_base_directory(), "conf", self.mapping_file_name)
if not os.path.exists(fp_mapping):
raise Exception(f"Mapping file not found at {fp_mapping}")
with open(fp_mapping) as f:
schema = json.load(f)
table_names = inf_db.list_tables().table_names
for table_name in table_names:
if not table_name.startswith(self.table_name_prefix):
# Skip tables not created by me
continue
inf_table = inf_db.get_table(table_name)
index_names = inf_table.list_indexes().index_names
if "q_vec_idx" not in index_names:
# Skip tables not created by me
continue
column_names = inf_table.show_columns()["name"]
column_names = set(column_names)
for field_name, field_info in schema.items():
is_new_column = field_name not in column_names
if is_new_column:
res = inf_table.add_columns({field_name: field_info})
assert res.error_code == infinity.ErrorCode.OK
self.logger.info(f"INFINITY added following column to table {table_name}: {field_name} {field_info}")
if field_info["type"] == "varchar" and "analyzer" in field_info:
analyzers = field_info["analyzer"]
if isinstance(analyzers, str):
analyzers = [analyzers]
for analyzer in analyzers:
inf_table.create_index(
f"ft_{re.sub(r'[^a-zA-Z0-9]', '_', field_name)}_{re.sub(r'[^a-zA-Z0-9]', '_', analyzer)}",
IndexInfo(field_name, IndexType.FullText, {"ANALYZER": analyzer}),
ConflictType.Ignore,
)
if "index_type" in field_info:
index_config = field_info["index_type"]
if isinstance(index_config, str) and index_config == "secondary":
inf_table.create_index(
f"sec_{field_name}",
IndexInfo(field_name, IndexType.Secondary),
ConflictType.Ignore,
)
self.logger.info(f"INFINITY created secondary index sec_{field_name} for field {field_name}")
elif isinstance(index_config, dict):
if index_config.get("type") == "secondary":
params = {}
if "cardinality" in index_config:
params = {"cardinality": index_config["cardinality"]}
inf_table.create_index(
f"sec_{field_name}",
IndexInfo(field_name, IndexType.Secondary, params),
ConflictType.Ignore,
)
self.logger.info(f"INFINITY created secondary index sec_{field_name} for field {field_name} with params {params}")
"""
Dataframe and fields convert
"""
@staticmethod
@abstractmethod
def field_keyword(field_name: str):
# judge keyword or not, such as "*_kwd" tag-like columns.
raise NotImplementedError("Not implemented")
@abstractmethod
def convert_select_fields(self, output_fields: list[str]) -> list[str]:
# rm _kwd, _tks, _sm_tks, _with_weight suffix in field name.
raise NotImplementedError("Not implemented")
@staticmethod
@abstractmethod
def convert_matching_field(field_weight_str: str) -> str:
# convert matching field to
raise NotImplementedError("Not implemented")
@staticmethod
def list2str(lst: str | list, sep: str = " ") -> str:
if isinstance(lst, str):
return lst
return sep.join(lst)
def equivalent_condition_to_str(self, condition: dict, table_instance=None) -> str | None:
assert "_id" not in condition
columns = {}
if table_instance:
for n, ty, de, _ in table_instance.show_columns().rows():
columns[n] = (ty, de)
def exists(cln):
nonlocal columns
assert cln in columns, f"'{cln}' should be in '{columns}'."
ty, de = columns[cln]
if ty.lower().find("cha"):
if not de:
de = ""
return f" {cln}!='{de}' "
return f"{cln}!={de}"
cond = list()
for k, v in condition.items():
if not isinstance(k, str) or not v:
continue
if self.field_keyword(k):
if isinstance(v, list):
inCond = list()
for item in v:
if isinstance(item, str):
item = item.replace("'", "''")
inCond.append(f"filter_fulltext('{self.convert_matching_field(k)}', '{item}')")
if inCond:
strInCond = " or ".join(inCond)
strInCond = f"({strInCond})"
cond.append(strInCond)
else:
cond.append(f"filter_fulltext('{self.convert_matching_field(k)}', '{v}')")
elif isinstance(v, list):
inCond = list()
for item in v:
if isinstance(item, str):
item = item.replace("'", "''")
inCond.append(f"'{item}'")
else:
inCond.append(str(item))
if inCond:
strInCond = ", ".join(inCond)
strInCond = f"{k} IN ({strInCond})"
cond.append(strInCond)
elif k == "must_not":
if isinstance(v, dict):
for kk, vv in v.items():
if kk == "exists":
cond.append("NOT (%s)" % exists(vv))
elif isinstance(v, str):
cond.append(f"{k}='{v}'")
elif k == "exists":
cond.append(exists(v))
else:
cond.append(f"{k}={str(v)}")
return " AND ".join(cond) if cond else "1=1"
@staticmethod
def concat_dataframes(df_list: list[pd.DataFrame], select_fields: list[str]) -> pd.DataFrame:
df_list2 = [df for df in df_list if not df.empty]
if df_list2:
return pd.concat(df_list2, axis=0).reset_index(drop=True)
schema = []
for field_name in select_fields:
if field_name == "score()": # Workaround: fix schema is changed to score()
schema.append("SCORE")
elif field_name == "similarity()": # Workaround: fix schema is changed to similarity()
schema.append("SIMILARITY")
else:
schema.append(field_name)
return pd.DataFrame(columns=schema)
"""
Database operations
"""
def db_type(self) -> str:
return "infinity"
def health(self) -> dict:
"""
Return the health status of the database.
"""
inf_conn = self.connPool.get_conn()
res = inf_conn.show_current_node()
self.connPool.release_conn(inf_conn)
res2 = {
"type": "infinity",
"status": "green" if res.error_code == 0 and res.server_status in ["started", "alive"] else "red",
"error": res.error_msg,
}
return res2
"""
Table operations
"""
def create_idx(self, index_name: str, dataset_id: str, vector_size: int, parser_id: str = None):
table_name = f"{index_name}_{dataset_id}"
self.logger.debug(f"CREATE_IDX: Creating table {table_name}, parser_id: {parser_id}")
inf_conn = self.connPool.get_conn()
inf_db = inf_conn.create_database(self.dbName, ConflictType.Ignore)
# Use configured schema
fp_mapping = os.path.join(get_project_base_directory(), "conf", self.mapping_file_name)
if not os.path.exists(fp_mapping):
raise Exception(f"Mapping file not found at {fp_mapping}")
schema = json.load(open(fp_mapping))
if parser_id is not None:
from common.constants import ParserType
if parser_id == ParserType.TABLE.value:
# Table parser: add chunk_data JSON column to store table-specific fields
schema["chunk_data"] = {"type": "json", "default": "{}"}
self.logger.info("Added chunk_data column for TABLE parser")
vector_name = f"q_{vector_size}_vec"
schema[vector_name] = {"type": f"vector,{vector_size},float"}
inf_table = inf_db.create_table(
table_name,
schema,
ConflictType.Ignore,
)
inf_table.create_index(
"q_vec_idx",
IndexInfo(
vector_name,
IndexType.Hnsw,
{
"M": "16",
"ef_construction": "50",
"metric": "cosine",
"encode": "lvq",
},
),
ConflictType.Ignore,
)
for field_name, field_info in schema.items():
if field_info["type"] != "varchar" or "analyzer" not in field_info:
continue
analyzers = field_info["analyzer"]
if isinstance(analyzers, str):
analyzers = [analyzers]
for analyzer in analyzers:
inf_table.create_index(
f"ft_{re.sub(r'[^a-zA-Z0-9]', '_', field_name)}_{re.sub(r'[^a-zA-Z0-9]', '_', analyzer)}",
IndexInfo(field_name, IndexType.FullText, {"ANALYZER": analyzer}),
ConflictType.Ignore,
)
# Create secondary indexes for fields with index_type
for field_name, field_info in schema.items():
if "index_type" not in field_info:
continue
index_config = field_info["index_type"]
if isinstance(index_config, str) and index_config == "secondary":
inf_table.create_index(
f"sec_{field_name}",
IndexInfo(field_name, IndexType.Secondary),
ConflictType.Ignore,
)
self.logger.info(f"INFINITY created secondary index sec_{field_name} for field {field_name}")
elif isinstance(index_config, dict):
if index_config.get("type") == "secondary":
params = {}
if "cardinality" in index_config:
params = {"cardinality": index_config["cardinality"]}
inf_table.create_index(
f"sec_{field_name}",
IndexInfo(field_name, IndexType.Secondary, params),
ConflictType.Ignore,
)
self.logger.info(f"INFINITY created secondary index sec_{field_name} for field {field_name} with params {params}")
self.connPool.release_conn(inf_conn)
self.logger.info(f"INFINITY created table {table_name}, vector size {vector_size}")
return True
def create_doc_meta_idx(self, index_name: str):
"""
Create a document metadata table.
Table name pattern: ragflow_doc_meta_{tenant_id}
- Per-tenant metadata table for storing document metadata fields
"""
table_name = index_name
inf_conn = self.connPool.get_conn()
inf_db = inf_conn.create_database(self.dbName, ConflictType.Ignore)
try:
fp_mapping = os.path.join(get_project_base_directory(), "conf", "doc_meta_infinity_mapping.json")
if not os.path.exists(fp_mapping):
self.logger.error(f"Document metadata mapping file not found at {fp_mapping}")
return False
with open(fp_mapping) as f:
schema = json.load(f)
inf_db.create_table(
table_name,
schema,
ConflictType.Ignore,
)
# Create secondary indexes on id and kb_id for better query performance
inf_table = inf_db.get_table(table_name)
try:
inf_table.create_index(
f"idx_{table_name}_id",
IndexInfo("id", IndexType.Secondary),
ConflictType.Ignore,
)
self.logger.debug(f"INFINITY created secondary index on id for table {table_name}")
except Exception as e:
self.logger.warning(f"Failed to create index on id for {table_name}: {e}")
try:
inf_table.create_index(
f"idx_{table_name}_kb_id",
IndexInfo("kb_id", IndexType.Secondary),
ConflictType.Ignore,
)
self.logger.debug(f"INFINITY created secondary index on kb_id for table {table_name}")
except Exception as e:
self.logger.warning(f"Failed to create index on kb_id for {table_name}: {e}")
self.connPool.release_conn(inf_conn)
self.logger.debug(f"INFINITY created document metadata table {table_name} with secondary indexes")
return True
except Exception as e:
self.connPool.release_conn(inf_conn)
self.logger.exception(f"Error creating document metadata table {table_name}: {e}")
return False
def delete_idx(self, index_name: str, dataset_id: str):
if index_name.startswith("ragflow_doc_meta_"):
table_name = index_name
else:
table_name = f"{index_name}_{dataset_id}"
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
db_instance.drop_table(table_name, ConflictType.Ignore)
self.connPool.release_conn(inf_conn)
self.logger.info(f"INFINITY dropped table {table_name}")
def index_exist(self, index_name: str, dataset_id: str) -> bool:
if index_name.startswith("ragflow_doc_meta_"):
table_name = index_name
else:
table_name = f"{index_name}_{dataset_id}"
try:
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
_ = db_instance.get_table(table_name)
self.connPool.release_conn(inf_conn)
return True
except Exception as e:
self.logger.warning(f"INFINITY indexExist {str(e)}")
return False
"""
CRUD operations
"""
@abstractmethod
def search(
self,
select_fields: list[str],
highlight_fields: list[str],
condition: dict,
match_expressions: list[MatchExpr],
order_by: OrderByExpr,
offset: int,
limit: int,
index_names: str | list[str],
dataset_ids: list[str],
agg_fields: list[str] | None = None,
rank_feature: dict | None = None,
) -> tuple[pd.DataFrame, int]:
raise NotImplementedError("Not implemented")
@abstractmethod
def get(self, doc_id: str, index_name: str, knowledgebase_ids: list[str]) -> dict | None:
raise NotImplementedError("Not implemented")
@abstractmethod
def insert(self, documents: list[dict], index_name: str, dataset_ids: str = None) -> list[str]:
raise NotImplementedError("Not implemented")
@abstractmethod
def update(self, condition: dict, new_value: dict, index_name: str, dataset_id: str) -> bool:
raise NotImplementedError("Not implemented")
def delete(self, condition: dict, index_name: str, dataset_id: str) -> int:
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
if index_name.startswith("ragflow_doc_meta_"):
table_name = index_name
else:
table_name = f"{index_name}_{dataset_id}"
try:
table_instance = db_instance.get_table(table_name)
except Exception:
self.logger.warning(f"Skipped deleting from table {table_name} since the table doesn't exist.")
return 0
filter = self.equivalent_condition_to_str(condition, table_instance)
self.logger.debug(f"INFINITY delete table {table_name}, filter {filter}.")
res = table_instance.delete(filter)
self.connPool.release_conn(inf_conn)
return res.deleted_rows
"""
Helper functions for search result
"""
def get_total(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> int:
if isinstance(res, tuple):
return res[1]
return len(res)
def get_doc_ids(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> list[str]:
if isinstance(res, tuple):
res = res[0]
return list(res["id"])
@abstractmethod
def get_fields(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fields: list[str]) -> dict[str, dict]:
raise NotImplementedError("Not implemented")
def get_highlight(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, keywords: list[str], field_name: str):
if isinstance(res, tuple):
res = res[0]
ans = {}
num_rows = len(res)
column_id = res["id"]
if field_name not in res:
if field_name == "content_with_weight" and "content" in res:
field_name = "content"
else:
return {}
for i in range(num_rows):
id = column_id[i]
txt = res[field_name][i]
if re.search(r"<em>[^<>]+</em>", txt, flags=re.IGNORECASE | re.MULTILINE):
ans[id] = txt
continue
txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE | re.MULTILINE)
txt_list = []
for t in re.split(r"[.?!;\n]", txt):
if is_english([t]):
for w in keywords:
t = re.sub(
r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])" % re.escape(w),
r"\1<em>\2</em>\3",
t,
flags=re.IGNORECASE | re.MULTILINE,
)
else:
for w in sorted(keywords, key=len, reverse=True):
t = re.sub(
re.escape(w),
f"<em>{w}</em>",
t,
flags=re.IGNORECASE | re.MULTILINE,
)
if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE | re.MULTILINE):
continue
txt_list.append(t)
if txt_list:
ans[id] = "...".join(txt_list)
else:
ans[id] = txt
return ans
def get_aggregation(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, field_name: str):
"""
Manual aggregation for tag fields since Infinity doesn't provide native aggregation
"""
from collections import Counter
# Extract DataFrame from result
if isinstance(res, tuple):
df, _ = res
else:
df = res
if df.empty or field_name not in df.columns:
return []
# Aggregate tag counts
tag_counter = Counter()
for value in df[field_name]:
if pd.isna(value) or not value:
continue
# Handle different tag formats
if isinstance(value, str):
# Split by ### for tag_kwd field or comma for other formats
if field_name == "tag_kwd" and "###" in value:
tags = [tag.strip() for tag in value.split("###") if tag.strip()]
else:
# Try comma separation as fallback
tags = [tag.strip() for tag in value.split(",") if tag.strip()]
for tag in tags:
if tag: # Only count non-empty tags
tag_counter[tag] += 1
elif isinstance(value, list):
# Handle list format
for tag in value:
if tag and isinstance(tag, str):
tag_counter[tag.strip()] += 1
# Return as list of [tag, count] pairs, sorted by count descending
return [[tag, count] for tag, count in tag_counter.most_common()]
"""
SQL
"""
def sql(self, sql: str, fetch_size: int, format: str):
"""
Execute SQL query on Infinity database via psql command.
Transform text-to-sql for Infinity's SQL syntax.
"""
import subprocess
try:
self.logger.debug(f"InfinityConnection.sql get sql: {sql}")
# Clean up SQL
sql = re.sub(r"[ `]+", " ", sql)
sql = sql.replace("%", "")
# Transform SELECT field aliases to actual stored field names
# Build field mapping from infinity_mapping.json comment field
field_mapping = {}
# Also build reverse mapping for column names in result
reverse_mapping = {}
fp_mapping = os.path.join(get_project_base_directory(), "conf", self.mapping_file_name)
if os.path.exists(fp_mapping):
with open(fp_mapping) as f:
schema = json.load(f)
for field_name, field_info in schema.items():
if "comment" in field_info:
# Parse comma-separated aliases from comment
# e.g., "docnm_kwd, title_tks, title_sm_tks"
aliases = [a.strip() for a in field_info["comment"].split(",")]
for alias in aliases:
field_mapping[alias] = field_name
reverse_mapping[field_name] = alias # Store first alias for reverse mapping
# Replace field names in SELECT clause
select_match = re.search(r"(select\s+.*?)(from\s+)", sql, re.IGNORECASE)
if select_match:
select_clause = select_match.group(1)
from_clause = select_match.group(2)
# Apply field transformations
for alias, actual in field_mapping.items():
select_clause = re.sub(rf"(^|[, ]){alias}([, ]|$)", rf"\1{actual}\2", select_clause)
sql = select_clause + from_clause + sql[select_match.end() :]
# Also replace field names in WHERE, ORDER BY, GROUP BY, and HAVING clauses
for alias, actual in field_mapping.items():
# Transform in WHERE clause
sql = re.sub(rf"(\bwhere\s+[^;]*?)(\b){re.escape(alias)}\b", rf"\1{actual}", sql, flags=re.IGNORECASE)
# Transform in ORDER BY clause
sql = re.sub(rf"(\border by\s+[^;]*?)(\b){re.escape(alias)}\b", rf"\1{actual}", sql, flags=re.IGNORECASE)
# Transform in GROUP BY clause
sql = re.sub(rf"(\bgroup by\s+[^;]*?)(\b){re.escape(alias)}\b", rf"\1{actual}", sql, flags=re.IGNORECASE)
# Transform in HAVING clause
sql = re.sub(rf"(\bhaving\s+[^;]*?)(\b){re.escape(alias)}\b", rf"\1{actual}", sql, flags=re.IGNORECASE)
self.logger.debug(f"InfinityConnection.sql to execute: {sql}")
# Get connection parameters from the Infinity connection pool wrapper
# We need to use INFINITY_CONN singleton, not the raw ConnectionPool
from common.doc_store.infinity_conn_pool import INFINITY_CONN
conn_info = INFINITY_CONN.get_conn_uri()
# Parse host and port from conn_info
if conn_info and "host=" in conn_info:
host_match = re.search(r"host=(\S+)", conn_info)
if host_match:
host = host_match.group(1)
else:
host = "infinity"
else:
host = "infinity"
# Parse port from conn_info, default to 5432 if not found
if conn_info and "port=" in conn_info:
port_match = re.search(r"port=(\d+)", conn_info)
if port_match:
port = port_match.group(1)
else:
port = "5432"
else:
port = "5432"
# Use psql command to execute SQL
# Use full path to psql to avoid PATH issues
psql_path = "/usr/bin/psql"
# Check if psql exists at expected location, otherwise try to find it
import shutil
psql_from_path = shutil.which("psql")
if psql_from_path:
psql_path = psql_from_path
# Execute SQL with psql to get both column names and data in one call
psql_cmd = [
psql_path,
"-h",
host,
"-p",
port,
"-c",
sql,
]
self.logger.debug(f"Executing psql command: {' '.join(psql_cmd)}")
result = subprocess.run(
psql_cmd,
capture_output=True,
text=True,
timeout=10, # 10 second timeout
)
if result.returncode != 0:
error_msg = result.stderr.strip()
raise Exception(f"psql command failed: {error_msg}\nSQL: {sql}")
# Parse the output
output = result.stdout.strip()
if not output:
# No results
return {"columns": [], "rows": []} if format == "json" else []
# Parse psql table output which has format:
# col1 | col2 | col3
# -----+-----+-----
# val1 | val2 | val3
lines = output.split("\n")
# Extract column names from first line
columns = []
rows = []
if len(lines) >= 1:
header_line = lines[0]
for col_name in header_line.split("|"):
col_name = col_name.strip()
if col_name:
columns.append({"name": col_name})
# Data starts after the separator line (line with dashes)
data_start = 2 if len(lines) >= 2 and "-" in lines[1] else 1
for i in range(data_start, len(lines)):
line = lines[i].strip()
# Skip empty lines and footer lines like "(1 row)"
if not line or re.match(r"^\(\d+ row", line):
continue
# Split by | and strip each cell
row = [cell.strip() for cell in line.split("|")]
# Ensure row matches column count
if len(row) == len(columns):
rows.append(row)
elif len(row) > len(columns):
# Row has more cells than columns - truncate
rows.append(row[: len(columns)])
elif len(row) < len(columns):
# Row has fewer cells - pad with empty strings
rows.append(row + [""] * (len(columns) - len(row)))
if format == "json":
result = {"columns": columns, "rows": rows[:fetch_size] if fetch_size > 0 else rows}
else:
result = rows[:fetch_size] if fetch_size > 0 else rows
return result
except subprocess.TimeoutExpired:
self.logger.exception(f"InfinityConnection.sql timeout. SQL:\n{sql}")
raise Exception(f"SQL timeout\n\nSQL: {sql}")
except Exception as e:
self.logger.exception(f"InfinityConnection.sql got exception. SQL:\n{sql}")
raise Exception(f"SQL error: {e}\n\nSQL: {sql}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/doc_store/infinity_conn_base.py",
"license": "Apache License 2.0",
"lines": 667,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/doc_store/infinity_conn_pool.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import infinity
from infinity.connection_pool import ConnectionPool
from infinity.errors import ErrorCode
from common import settings
from common.decorator import singleton
@singleton
class InfinityConnectionPool:
def __init__(self):
if hasattr(settings, "INFINITY"):
self.INFINITY_CONFIG = settings.INFINITY
else:
self.INFINITY_CONFIG = settings.get_base_config("infinity", {
"uri": "infinity:23817",
"postgres_port": 5432,
"db_name": "default_db"
})
infinity_uri = self.INFINITY_CONFIG["uri"]
if ":" in infinity_uri:
host, port = infinity_uri.split(":")
self.infinity_uri = infinity.common.NetworkAddress(host, int(port))
for _ in range(24):
try:
conn_pool = ConnectionPool(self.infinity_uri, max_size=4)
inf_conn = conn_pool.get_conn()
res = inf_conn.show_current_node()
if res.error_code == ErrorCode.OK and res.server_status in ["started", "alive"]:
self.conn_pool = conn_pool
conn_pool.release_conn(inf_conn)
break
except Exception as e:
logging.warning(f"{str(e)}. Waiting Infinity {infinity_uri} to be healthy.")
time.sleep(5)
if self.conn_pool is None:
msg = f"Infinity {infinity_uri} is unhealthy in 120s."
logging.error(msg)
raise Exception(msg)
logging.info(f"Infinity {infinity_uri} is healthy.")
def get_conn_pool(self):
return self.conn_pool
def get_conn_uri(self):
"""
Get connection URI for PostgreSQL protocol.
"""
infinity_uri = self.INFINITY_CONFIG["uri"]
postgres_port = self.INFINITY_CONFIG["postgres_port"]
db_name = self.INFINITY_CONFIG["db_name"]
if ":" in infinity_uri:
host, _ = infinity_uri.split(":")
return f"host={host} port={postgres_port} dbname={db_name}"
return f"host=localhost port={postgres_port} dbname={db_name}"
def refresh_conn_pool(self):
try:
inf_conn = self.conn_pool.get_conn()
res = inf_conn.show_current_node()
if res.error_code == ErrorCode.OK and res.server_status in ["started", "alive"]:
return self.conn_pool
else:
raise Exception(f"{res.error_code}: {res.server_status}")
except Exception as e:
logging.error(str(e))
if hasattr(self, "conn_pool") and self.conn_pool:
self.conn_pool.destroy()
self.conn_pool = ConnectionPool(self.infinity_uri, max_size=32)
return self.conn_pool
def __del__(self):
if hasattr(self, "conn_pool") and self.conn_pool:
self.conn_pool.destroy()
INFINITY_CONN = InfinityConnectionPool()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/doc_store/infinity_conn_pool.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:memory/services/messages.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from typing import List
from common import settings
from common.constants import MemoryType
from common.doc_store.doc_store_base import OrderByExpr, MatchExpr
def index_name(uid: str): return f"memory_{uid}"
class MessageService:
@classmethod
def has_index(cls, uid: str, memory_id: str):
index = index_name(uid)
return settings.msgStoreConn.index_exist(index, memory_id)
@classmethod
def create_index(cls, uid: str, memory_id: str, vector_size: int):
index = index_name(uid)
return settings.msgStoreConn.create_idx(index, memory_id, vector_size)
@classmethod
def delete_index(cls, uid: str, memory_id: str):
index = index_name(uid)
return settings.msgStoreConn.delete_idx(index, memory_id)
@classmethod
def insert_message(cls, messages: List[dict], uid: str, memory_id: str):
index = index_name(uid)
[m.update({
"id": f'{memory_id}_{m["message_id"]}',
"status": 1 if m["status"] else 0
}) for m in messages]
return settings.msgStoreConn.insert(messages, index, memory_id)
@classmethod
def update_message(cls, condition: dict, update_dict: dict, uid: str, memory_id: str):
index = index_name(uid)
if "status" in update_dict:
update_dict["status"] = 1 if update_dict["status"] else 0
return settings.msgStoreConn.update(condition, update_dict, index, memory_id)
@classmethod
def delete_message(cls, condition: dict, uid: str, memory_id: str):
index = index_name(uid)
return settings.msgStoreConn.delete(condition, index, memory_id)
@classmethod
def list_message(cls, uid: str, memory_id: str, agent_ids: List[str]=None, keywords: str=None, page: int=1, page_size: int=50):
index = index_name(uid)
filter_dict = {}
if agent_ids:
filter_dict["agent_id"] = agent_ids
if keywords:
filter_dict["session_id"] = keywords
select_fields = [
"message_id", "message_type", "source_id", "memory_id", "user_id", "agent_id", "session_id", "valid_at",
"invalid_at", "forget_at", "status"
]
order_by = OrderByExpr()
order_by.desc("valid_at")
res, total_count = settings.msgStoreConn.search(
select_fields=select_fields,
highlight_fields=[],
condition={**filter_dict, "message_type": MemoryType.RAW.name.lower()},
match_expressions=[], order_by=order_by,
offset=(page-1)*page_size, limit=page_size,
index_names=index, memory_ids=[memory_id], agg_fields=[], hide_forgotten=False
)
if not total_count:
return {
"message_list": [],
"total_count": 0
}
raw_msg_mapping = settings.msgStoreConn.get_fields(res, select_fields)
raw_messages = list(raw_msg_mapping.values())
extract_filter = {"source_id": [r["message_id"] for r in raw_messages]}
extract_res, _ = settings.msgStoreConn.search(
select_fields=select_fields,
highlight_fields=[],
condition=extract_filter,
match_expressions=[], order_by=order_by,
offset=0, limit=512,
index_names=index, memory_ids=[memory_id], agg_fields=[], hide_forgotten=False
)
extract_msg = settings.msgStoreConn.get_fields(extract_res, select_fields)
grouped_extract_msg = {}
for msg in extract_msg.values():
if grouped_extract_msg.get(msg["source_id"]):
grouped_extract_msg[msg["source_id"]].append(msg)
else:
grouped_extract_msg[msg["source_id"]] = [msg]
for raw_msg in raw_messages:
raw_msg["extract"] = grouped_extract_msg.get(raw_msg["message_id"], [])
return {
"message_list": raw_messages,
"total_count": total_count
}
@classmethod
def get_recent_messages(cls, uid_list: List[str], memory_ids: List[str], agent_id: str, session_id: str, limit: int):
index_names = [index_name(uid) for uid in uid_list]
condition_dict = {
"agent_id": agent_id,
"session_id": session_id
}
order_by = OrderByExpr()
order_by.desc("valid_at")
res, total_count = settings.msgStoreConn.search(
select_fields=[
"message_id", "message_type", "source_id", "memory_id", "user_id", "agent_id", "session_id", "valid_at",
"invalid_at", "forget_at", "status", "content"
],
highlight_fields=[],
condition=condition_dict,
match_expressions=[], order_by=order_by,
offset=0, limit=limit,
index_names=index_names, memory_ids=memory_ids, agg_fields=[]
)
if not total_count:
return []
doc_mapping = settings.msgStoreConn.get_fields(res, [
"message_id", "message_type", "source_id", "memory_id","user_id", "agent_id", "session_id",
"valid_at", "invalid_at", "forget_at", "status", "content"
])
return list(doc_mapping.values())
@classmethod
def search_message(cls, memory_ids: List[str], condition_dict: dict, uid_list: List[str], match_expressions:list[MatchExpr], top_n: int):
index_names = [index_name(uid) for uid in uid_list]
# filter only valid messages by default
if "status" not in condition_dict:
condition_dict["status"] = 1
order_by = OrderByExpr()
order_by.desc("valid_at")
res, total_count = settings.msgStoreConn.search(
select_fields=[
"message_id", "message_type", "source_id", "memory_id", "user_id", "agent_id", "session_id",
"valid_at",
"invalid_at", "forget_at", "status", "content"
],
highlight_fields=[],
condition=condition_dict,
match_expressions=match_expressions,
order_by=order_by,
offset=0, limit=top_n,
index_names=index_names, memory_ids=memory_ids, agg_fields=[]
)
if not total_count:
return []
docs = settings.msgStoreConn.get_fields(res, [
"message_id", "message_type", "source_id", "memory_id", "user_id", "agent_id", "session_id", "valid_at",
"invalid_at", "forget_at", "status", "content"
])
return list(docs.values())
@staticmethod
def calculate_message_size(message: dict):
return sys.getsizeof(message["content"]) + sys.getsizeof(message["content_embed"][0]) * len(message["content_embed"])
@classmethod
def calculate_memory_size(cls, memory_ids: List[str], uid_list: List[str]):
index_names = [index_name(uid) for uid in uid_list]
order_by = OrderByExpr()
order_by.desc("valid_at")
res, count = settings.msgStoreConn.search(
select_fields=["memory_id", "content", "content_embed"],
highlight_fields=[],
condition={},
match_expressions=[],
order_by=order_by,
offset=0, limit=2048*len(memory_ids),
index_names=index_names, memory_ids=memory_ids, agg_fields=[], hide_forgotten=False
)
if count == 0:
return {}
docs = settings.msgStoreConn.get_fields(res, ["memory_id", "content", "content_embed"])
size_dict = {}
for doc in docs.values():
if size_dict.get(doc["memory_id"]):
size_dict[doc["memory_id"]] += cls.calculate_message_size(doc)
else:
size_dict[doc["memory_id"]] = cls.calculate_message_size(doc)
return size_dict
@classmethod
def pick_messages_to_delete_by_fifo(cls, memory_id: str, uid: str, size_to_delete: int):
select_fields = ["message_id", "content", "content_embed"]
_index_name = index_name(uid)
res = settings.msgStoreConn.get_forgotten_messages(select_fields, _index_name, memory_id)
current_size = 0
ids_to_remove = []
if res:
message_list = settings.msgStoreConn.get_fields(res, select_fields)
for message in message_list.values():
if current_size < size_to_delete:
current_size += cls.calculate_message_size(message)
ids_to_remove.append(message["message_id"])
else:
return ids_to_remove, current_size
if current_size >= size_to_delete:
return ids_to_remove, current_size
order_by = OrderByExpr()
order_by.asc("valid_at")
res, total_count = settings.msgStoreConn.search(
select_fields=select_fields,
highlight_fields=[],
condition={},
match_expressions=[],
order_by=order_by,
offset=0, limit=512,
index_names=[_index_name], memory_ids=[memory_id], agg_fields=[]
)
docs = settings.msgStoreConn.get_fields(res, select_fields)
for doc in docs.values():
if current_size < size_to_delete:
current_size += cls.calculate_message_size(doc)
ids_to_remove.append(doc["message_id"])
else:
return ids_to_remove, current_size
return ids_to_remove, current_size
@classmethod
def get_missing_field_messages(cls, memory_id: str, uid: str, field_name: str):
select_fields = ["message_id", "content"]
_index_name = index_name(uid)
res = settings.msgStoreConn.get_missing_field_message(
select_fields=select_fields,
index_name=_index_name,
memory_id=memory_id,
field_name=field_name
)
if not res:
return []
docs = settings.msgStoreConn.get_fields(res, select_fields)
return list(docs.values())
@classmethod
def get_by_message_id(cls, memory_id: str, message_id: int, uid: str):
index = index_name(uid)
doc_id = f'{memory_id}_{message_id}'
return settings.msgStoreConn.get(doc_id, index, [memory_id])
@classmethod
def get_max_message_id(cls, uid_list: List[str], memory_ids: List[str]):
order_by = OrderByExpr()
order_by.desc("message_id")
index_names = [index_name(uid) for uid in uid_list]
res, total_count = settings.msgStoreConn.search(
select_fields=["message_id"],
highlight_fields=[],
condition={},
match_expressions=[],
order_by=order_by,
offset=0, limit=1,
index_names=index_names, memory_ids=memory_ids,
agg_fields=[], hide_forgotten=False
)
if not total_count:
return 1
docs = settings.msgStoreConn.get_fields(res, ["message_id"])
if not docs:
return 1
else:
latest_msg = list(docs.values())[0]
return int(latest_msg["message_id"])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "memory/services/messages.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:memory/services/query.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import logging
import json
import numpy as np
from common.query_base import QueryBase
from common.doc_store.doc_store_base import MatchDenseExpr, MatchTextExpr
from common.float_utils import get_float
from rag.nlp import rag_tokenizer, term_weight, synonym
def get_vector(txt, emb_mdl, topk=10, similarity=0.1):
if isinstance(similarity, str) and len(similarity) > 0:
try:
similarity = float(similarity)
except Exception as e:
logging.warning(f"Convert similarity '{similarity}' to float failed: {e}. Using default 0.1")
similarity = 0.1
qv, _ = emb_mdl.encode_queries(txt)
shape = np.array(qv).shape
if len(shape) > 1:
raise Exception(
f"Dealer.get_vector returned array's shape {shape} doesn't match expectation(exact one dimension).")
embedding_data = [get_float(v) for v in qv]
vector_column_name = f"q_{len(embedding_data)}_vec"
return MatchDenseExpr(vector_column_name, embedding_data, 'float', 'cosine', topk, {"similarity": similarity})
class MsgTextQuery(QueryBase):
def __init__(self):
self.tw = term_weight.Dealer()
self.syn = synonym.Dealer()
self.query_fields = [
"content"
]
def question(self, txt, tbl="messages", min_match: float=0.6):
original_query = txt
txt = MsgTextQuery.add_space_between_eng_zh(txt)
txt = re.sub(
r"[ :|\r\n\t,,。??/`!!&^%%()\[\]{}<>]+",
" ",
rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(txt.lower())),
).strip()
otxt = txt
txt = MsgTextQuery.rmWWW(txt)
if not self.is_chinese(txt):
txt = self.rmWWW(txt)
tks = rag_tokenizer.tokenize(txt).split()
keywords = [t for t in tks if t]
tks_w = self.tw.weights(tks, preprocess=False)
tks_w = [(re.sub(r"[ \\\"'^]", "", tk), w) for tk, w in tks_w]
tks_w = [(re.sub(r"^[a-z0-9]$", "", tk), w) for tk, w in tks_w if tk]
tks_w = [(re.sub(r"^[\+-]", "", tk), w) for tk, w in tks_w if tk]
tks_w = [(tk.strip(), w) for tk, w in tks_w if tk.strip()]
syns = []
for tk, w in tks_w[:256]:
syn = self.syn.lookup(tk)
syn = rag_tokenizer.tokenize(" ".join(syn)).split()
keywords.extend(syn)
syn = ["\"{}\"^{:.4f}".format(s, w / 4.) for s in syn if s.strip()]
syns.append(" ".join(syn))
q = ["({}^{:.4f}".format(tk, w) + " {})".format(syn) for (tk, w), syn in zip(tks_w, syns) if
tk and not re.match(r"[.^+\(\)-]", tk)]
for i in range(1, len(tks_w)):
left, right = tks_w[i - 1][0].strip(), tks_w[i][0].strip()
if not left or not right:
continue
q.append(
'"%s %s"^%.4f'
% (
tks_w[i - 1][0],
tks_w[i][0],
max(tks_w[i - 1][1], tks_w[i][1]) * 2,
)
)
if not q:
q.append(txt)
query = " ".join(q)
return MatchTextExpr(
self.query_fields, query, 100, {"original_query": original_query}
), keywords
def need_fine_grained_tokenize(tk):
if len(tk) < 3:
return False
if re.match(r"[0-9a-z\.\+#_\*-]+$", tk):
return False
return True
txt = self.rmWWW(txt)
qs, keywords = [], []
for tt in self.tw.split(txt)[:256]: # .split():
if not tt:
continue
keywords.append(tt)
twts = self.tw.weights([tt])
syns = self.syn.lookup(tt)
if syns and len(keywords) < 32:
keywords.extend(syns)
logging.debug(json.dumps(twts, ensure_ascii=False))
tms = []
for tk, w in sorted(twts, key=lambda x: x[1] * -1):
sm = (
rag_tokenizer.fine_grained_tokenize(tk).split()
if need_fine_grained_tokenize(tk)
else []
)
sm = [
re.sub(
r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+",
"",
m,
)
for m in sm
]
sm = [self.sub_special_char(m) for m in sm if len(m) > 1]
sm = [m for m in sm if len(m) > 1]
if len(keywords) < 32:
keywords.append(re.sub(r"[ \\\"']+", "", tk))
keywords.extend(sm)
tk_syns = self.syn.lookup(tk)
tk_syns = [self.sub_special_char(s) for s in tk_syns]
if len(keywords) < 32:
keywords.extend([s for s in tk_syns if s])
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns]
if len(keywords) >= 32:
break
tk = self.sub_special_char(tk)
if tk.find(" ") > 0:
tk = '"%s"' % tk
if tk_syns:
tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns)
if sm:
tk = f'{tk} OR "%s" OR ("%s"~2)^0.5' % (" ".join(sm), " ".join(sm))
if tk.strip():
tms.append((tk, w))
tms = " ".join([f"({t})^{w}" for t, w in tms])
if len(twts) > 1:
tms += ' ("%s"~2)^1.5' % rag_tokenizer.tokenize(tt)
syns = " OR ".join(
[
'"%s"'
% rag_tokenizer.tokenize(self.sub_special_char(s))
for s in syns
]
)
if syns and tms:
tms = f"({tms})^5 OR ({syns})^0.7"
qs.append(tms)
if qs:
query = " OR ".join([f"({t})" for t in qs if t])
if not query:
query = otxt
return MatchTextExpr(
self.query_fields, query, 100, {"minimum_should_match": min_match, "original_query": original_query}
), keywords
return None, keywords | {
"repo_id": "infiniflow/ragflow",
"file_path": "memory/services/query.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:memory/utils/es_conn.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import json
import time
import copy
from elasticsearch import NotFoundError
from elasticsearch_dsl import UpdateByQuery, Q, Search
from elastic_transport import ConnectionTimeout
from common.decorator import singleton
from common.doc_store.doc_store_base import MatchExpr, OrderByExpr, MatchTextExpr, MatchDenseExpr, FusionExpr
from common.doc_store.es_conn_base import ESConnectionBase
from common.float_utils import get_float
from common.constants import PAGERANK_FLD, TAG_FLD
from rag.nlp.rag_tokenizer import tokenize, fine_grained_tokenize
ATTEMPT_TIME = 2
@singleton
class ESConnection(ESConnectionBase):
@staticmethod
def convert_field_name(field_name: str, use_tokenized_content=False) -> str:
match field_name:
case "message_type":
return "message_type_kwd"
case "status":
return "status_int"
case "content":
if use_tokenized_content:
return "tokenized_content_ltks"
return "content_ltks"
case _:
return field_name
@staticmethod
def map_message_to_es_fields(message: dict) -> dict:
"""
Map message dictionary fields to Elasticsearch document/Infinity fields.
:param message: A dictionary containing message details.
:return: A dictionary formatted for Elasticsearch/Infinity indexing.
"""
storage_doc = {
"id": message.get("id"),
"message_id": message["message_id"],
"message_type_kwd": message["message_type"],
"source_id": message["source_id"],
"memory_id": message["memory_id"],
"user_id": message["user_id"],
"agent_id": message["agent_id"],
"session_id": message["session_id"],
"valid_at": message["valid_at"],
"invalid_at": message["invalid_at"],
"forget_at": message["forget_at"],
"status_int": 1 if message["status"] else 0,
"zone_id": message.get("zone_id", 0),
"content_ltks": message["content"],
"tokenized_content_ltks": fine_grained_tokenize(tokenize(message["content"])),
f"q_{len(message['content_embed'])}_vec": message["content_embed"],
}
return storage_doc
@staticmethod
def get_message_from_es_doc(doc: dict) -> dict:
"""
Convert an Elasticsearch/Infinity document back to a message dictionary.
:param doc: A dictionary representing the Elasticsearch/Infinity document.
:return: A dictionary formatted as a message.
"""
embd_field_name = next((key for key in doc.keys() if re.match(r"q_\d+_vec", key)), None)
message = {
"message_id": doc["message_id"],
"message_type": doc["message_type_kwd"],
"source_id": doc["source_id"] if doc["source_id"] else None,
"memory_id": doc["memory_id"],
"user_id": doc.get("user_id", ""),
"agent_id": doc["agent_id"],
"session_id": doc["session_id"],
"zone_id": doc.get("zone_id", 0),
"valid_at": doc["valid_at"],
"invalid_at": doc.get("invalid_at", "-"),
"forget_at": doc.get("forget_at", "-"),
"status": bool(int(doc["status_int"])),
"content": doc.get("content_ltks", ""),
"content_embed": doc.get(embd_field_name, []) if embd_field_name else [],
}
if doc.get("id"):
message["id"] = doc["id"]
return message
"""
CRUD operations
"""
def search(
self, select_fields: list[str],
highlight_fields: list[str],
condition: dict,
match_expressions: list[MatchExpr],
order_by: OrderByExpr,
offset: int,
limit: int,
index_names: str | list[str],
memory_ids: list[str],
agg_fields: list[str] | None = None,
rank_feature: dict | None = None,
hide_forgotten: bool = True
):
"""
Refers to https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html
"""
if isinstance(index_names, str):
index_names = index_names.split(",")
assert isinstance(index_names, list) and len(index_names) > 0
assert "_id" not in condition
exist_index_list = [idx for idx in index_names if self.index_exist(idx)]
if not exist_index_list:
return None, 0
bool_query = Q("bool", must=[], must_not=[])
if hide_forgotten:
# filter not forget
bool_query.must_not.append(Q("exists", field="forget_at"))
condition["memory_id"] = memory_ids
for k, v in condition.items():
field_name = self.convert_field_name(k)
if field_name == "session_id" and v:
bool_query.filter.append(Q("query_string", **{"query": f"*{v}*", "fields": ["session_id"], "analyze_wildcard": True}))
continue
if not v:
continue
if isinstance(v, list):
bool_query.filter.append(Q("terms", **{field_name: v}))
elif isinstance(v, str) or isinstance(v, int):
bool_query.filter.append(Q("term", **{field_name: v}))
else:
raise Exception(
f"Condition `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str or list.")
s = Search()
vector_similarity_weight = 0.5
for m in match_expressions:
if isinstance(m, FusionExpr) and m.method == "weighted_sum" and "weights" in m.fusion_params:
assert len(match_expressions) == 3 and isinstance(match_expressions[0], MatchTextExpr) and isinstance(match_expressions[1],
MatchDenseExpr) and isinstance(
match_expressions[2], FusionExpr)
weights = m.fusion_params["weights"]
vector_similarity_weight = get_float(weights.split(",")[1])
for m in match_expressions:
if isinstance(m, MatchTextExpr):
minimum_should_match = m.extra_options.get("minimum_should_match", 0.0)
if isinstance(minimum_should_match, float):
minimum_should_match = str(int(minimum_should_match * 100)) + "%"
bool_query.must.append(Q("query_string", fields=[self.convert_field_name(f, use_tokenized_content=True) for f in m.fields],
type="best_fields", query=m.matching_text,
minimum_should_match=minimum_should_match,
boost=1))
bool_query.boost = 1.0 - vector_similarity_weight
elif isinstance(m, MatchDenseExpr):
assert (bool_query is not None)
similarity = 0.0
if "similarity" in m.extra_options:
similarity = m.extra_options["similarity"]
s = s.knn(self.convert_field_name(m.vector_column_name),
m.topn,
m.topn * 2,
query_vector=list(m.embedding_data),
filter=bool_query.to_dict(),
similarity=similarity,
)
if bool_query and rank_feature:
for fld, sc in rank_feature.items():
if fld != PAGERANK_FLD:
fld = f"{TAG_FLD}.{fld}"
bool_query.should.append(Q("rank_feature", field=fld, linear={}, boost=sc))
if bool_query:
s = s.query(bool_query)
for field in highlight_fields:
s = s.highlight(field)
if order_by:
orders = list()
for field, order in order_by.fields:
order = "asc" if order == 0 else "desc"
if field.endswith("_int") or field.endswith("_flt"):
order_info = {"order": order, "unmapped_type": "float"}
else:
order_info = {"order": order, "unmapped_type": "text"}
orders.append({field: order_info})
s = s.sort(*orders)
if agg_fields:
for fld in agg_fields:
s.aggs.bucket(f'aggs_{fld}', 'terms', field=fld, size=1000000)
if limit > 0:
s = s[offset:offset + limit]
q = s.to_dict()
self.logger.debug(f"ESConnection.search {str(index_names)} query: " + json.dumps(q))
for i in range(ATTEMPT_TIME):
try:
#print(json.dumps(q, ensure_ascii=False))
res = self.es.search(index=exist_index_list,
body=q,
timeout="600s",
# search_type="dfs_query_then_fetch",
track_total_hits=True,
_source=True)
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
self.logger.debug(f"ESConnection.search {str(index_names)} res: " + str(res))
return res, self.get_total(res)
except ConnectionTimeout:
self.logger.exception("ES request timeout")
self._connect()
continue
except NotFoundError as e:
self.logger.debug(f"ESConnection.search {str(index_names)} query: " + str(q) + str(e))
return None, 0
except Exception as e:
self.logger.exception(f"ESConnection.search {str(index_names)} query: " + str(q) + str(e))
raise e
self.logger.error(f"ESConnection.search timeout for {ATTEMPT_TIME} times!")
raise Exception("ESConnection.search timeout.")
def get_forgotten_messages(self, select_fields: list[str], index_name: str, memory_id: str, limit: int=512):
bool_query = Q("bool", must=[])
bool_query.must.append(Q("exists", field="forget_at"))
bool_query.filter.append(Q("term", memory_id=memory_id))
# from old to new
order_by = OrderByExpr()
order_by.asc("forget_at")
# build search
s = Search()
s = s.query(bool_query)
orders = list()
for field, order in order_by.fields:
order = "asc" if order == 0 else "desc"
if field.endswith("_int") or field.endswith("_flt"):
order_info = {"order": order, "unmapped_type": "float"}
else:
order_info = {"order": order, "unmapped_type": "text"}
orders.append({field: order_info})
s = s.sort(*orders)
s = s[:limit]
q = s.to_dict()
# search
for i in range(ATTEMPT_TIME):
try:
res = self.es.search(index=index_name, body=q, timeout="600s", track_total_hits=True, _source=True)
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
self.logger.debug(f"ESConnection.search {str(index_name)} res: " + str(res))
return res
except ConnectionTimeout:
self.logger.exception("ES request timeout")
self._connect()
continue
except NotFoundError as e:
self.logger.debug(f"ESConnection.search {str(index_name)} query: " + str(q) + str(e))
return None
except Exception as e:
self.logger.exception(f"ESConnection.search {str(index_name)} query: " + str(q) + str(e))
raise e
self.logger.error(f"ESConnection.search timeout for {ATTEMPT_TIME} times!")
raise Exception("ESConnection.search timeout.")
def get_missing_field_message(self, select_fields: list[str], index_name: str, memory_id: str, field_name: str, limit: int=512):
if not self.index_exist(index_name):
return None
bool_query = Q("bool", must=[])
bool_query.must.append(Q("term", memory_id=memory_id))
bool_query.must_not.append(Q("exists", field=field_name))
# from old to new
order_by = OrderByExpr()
order_by.asc("valid_at")
# build search
s = Search()
s = s.query(bool_query)
orders = list()
for field, order in order_by.fields:
order = "asc" if order == 0 else "desc"
if field.endswith("_int") or field.endswith("_flt"):
order_info = {"order": order, "unmapped_type": "float"}
else:
order_info = {"order": order, "unmapped_type": "text"}
orders.append({field: order_info})
s = s.sort(*orders)
s = s[:limit]
q = s.to_dict()
# search
for i in range(ATTEMPT_TIME):
try:
res = self.es.search(index=index_name, body=q, timeout="600s", track_total_hits=True, _source=True)
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
self.logger.debug(f"ESConnection.search {str(index_name)} res: " + str(res))
return res
except ConnectionTimeout:
self.logger.exception("ES request timeout")
self._connect()
continue
except NotFoundError as e:
self.logger.debug(f"ESConnection.search {str(index_name)} query: " + str(q) + str(e))
return None
except Exception as e:
self.logger.exception(f"ESConnection.search {str(index_name)} query: " + str(q) + str(e))
raise e
self.logger.error(f"ESConnection.search timeout for {ATTEMPT_TIME} times!")
raise Exception("ESConnection.search timeout.")
def get(self, doc_id: str, index_name: str, memory_ids: list[str]) -> dict | None:
for i in range(ATTEMPT_TIME):
try:
res = self.es.get(index=index_name,
id=doc_id, source=True, )
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
message = res["_source"]
message["id"] = doc_id
return self.get_message_from_es_doc(message)
except NotFoundError:
return None
except Exception as e:
self.logger.exception(f"ESConnection.get({doc_id}) got exception")
raise e
self.logger.error(f"ESConnection.get timeout for {ATTEMPT_TIME} times!")
raise Exception("ESConnection.get timeout.")
def insert(self, documents: list[dict], index_name: str, memory_id: str = None) -> list[str]:
# Refers to https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
operations = []
for d in documents:
assert "_id" not in d
assert "id" in d
d_copy_raw = copy.deepcopy(d)
d_copy = self.map_message_to_es_fields(d_copy_raw)
d_copy["memory_id"] = memory_id
meta_id = d_copy.pop("id", "")
operations.append(
{"index": {"_index": index_name, "_id": meta_id}})
operations.append(d_copy)
res = []
for _ in range(ATTEMPT_TIME):
try:
res = []
r = self.es.bulk(index=index_name, operations=operations,
refresh=False, timeout="60s")
if re.search(r"False", str(r["errors"]), re.IGNORECASE):
return res
for item in r["items"]:
for action in ["create", "delete", "index", "update"]:
if action in item and "error" in item[action]:
res.append(str(item[action]["_id"]) + ":" + str(item[action]["error"]))
return res
except ConnectionTimeout:
self.logger.exception("ES request timeout")
time.sleep(3)
self._connect()
continue
except Exception as e:
res.append(str(e))
self.logger.warning("ESConnection.insert got exception: " + str(e))
return res
def update(self, condition: dict, new_value: dict, index_name: str, memory_id: str) -> bool:
doc = copy.deepcopy(new_value)
update_dict = {self.convert_field_name(k): v for k, v in doc.items()}
if "content_ltks" in update_dict:
update_dict["tokenized_content_ltks"] = fine_grained_tokenize(tokenize(update_dict["content_ltks"]))
update_dict.pop("id", None)
condition_dict = {self.convert_field_name(k): v for k, v in condition.items()}
condition_dict["memory_id"] = memory_id
if "id" in condition_dict and isinstance(condition_dict["id"], str):
# update specific single document
message_id = condition_dict["id"]
for i in range(ATTEMPT_TIME):
for k in update_dict.keys():
if "feas" != k.split("_")[-1]:
continue
try:
self.es.update(index=index_name, id=message_id, script=f"ctx._source.remove(\"{k}\");")
except Exception:
self.logger.exception(f"ESConnection.update(index={index_name}, id={message_id}, doc={json.dumps(condition, ensure_ascii=False)}) got exception")
try:
self.es.update(index=index_name, id=message_id, doc=update_dict)
return True
except Exception as e:
self.logger.exception(
f"ESConnection.update(index={index_name}, id={message_id}, doc={json.dumps(condition, ensure_ascii=False)}) got exception: " + str(e))
break
return False
# update unspecific maybe-multiple documents
bool_query = Q("bool")
for k, v in condition_dict.items():
if not isinstance(k, str) or not v:
continue
if k == "exists":
bool_query.filter.append(Q("exists", field=v))
continue
if isinstance(v, list):
bool_query.filter.append(Q("terms", **{k: v}))
elif isinstance(v, str) or isinstance(v, int):
bool_query.filter.append(Q("term", **{k: v}))
else:
raise Exception(
f"Condition `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str or list.")
scripts = []
params = {}
for k, v in update_dict.items():
if k == "remove":
if isinstance(v, str):
scripts.append(f"ctx._source.remove('{v}');")
if isinstance(v, dict):
for kk, vv in v.items():
scripts.append(f"int i=ctx._source.{kk}.indexOf(params.p_{kk});ctx._source.{kk}.remove(i);")
params[f"p_{kk}"] = vv
continue
if k == "add":
if isinstance(v, dict):
for kk, vv in v.items():
scripts.append(f"ctx._source.{kk}.add(params.pp_{kk});")
params[f"pp_{kk}"] = vv.strip()
continue
if (not isinstance(k, str) or not v) and k != "status_int":
continue
if isinstance(v, str):
v = re.sub(r"(['\n\r]|\\.)", " ", v)
params[f"pp_{k}"] = v
scripts.append(f"ctx._source.{k}=params.pp_{k};")
elif isinstance(v, int) or isinstance(v, float):
scripts.append(f"ctx._source.{k}={v};")
elif isinstance(v, list):
scripts.append(f"ctx._source.{k}=params.pp_{k};")
params[f"pp_{k}"] = json.dumps(v, ensure_ascii=False)
else:
raise Exception(
f"newValue `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str.")
ubq = UpdateByQuery(
index=index_name).using(
self.es).query(bool_query)
ubq = ubq.script(source="".join(scripts), params=params)
ubq = ubq.params(refresh=True)
ubq = ubq.params(slices=5)
ubq = ubq.params(conflicts="proceed")
for _ in range(ATTEMPT_TIME):
try:
_ = ubq.execute()
return True
except ConnectionTimeout:
self.logger.exception("ES request timeout")
time.sleep(3)
self._connect()
continue
except Exception as e:
self.logger.error("ESConnection.update got exception: " + str(e) + "\n".join(scripts))
break
return False
def delete(self, condition: dict, index_name: str, memory_id: str) -> int:
assert "_id" not in condition
condition_dict = {self.convert_field_name(k): v for k, v in condition.items()}
condition_dict["memory_id"] = memory_id
if "id" in condition_dict:
message_ids = condition_dict["id"]
if not isinstance(message_ids, list):
message_ids = [message_ids]
if not message_ids: # when message_ids is empty, delete all
qry = Q("match_all")
else:
qry = Q("ids", values=message_ids)
else:
qry = Q("bool")
for k, v in condition_dict.items():
if k == "exists":
qry.filter.append(Q("exists", field=v))
elif k == "must_not":
if isinstance(v, dict):
for kk, vv in v.items():
if kk == "exists":
qry.must_not.append(Q("exists", field=vv))
elif isinstance(v, list):
qry.must.append(Q("terms", **{k: v}))
elif isinstance(v, str) or isinstance(v, int):
qry.must.append(Q("term", **{k: v}))
else:
raise Exception("Condition value must be int, str or list.")
self.logger.debug("ESConnection.delete query: " + json.dumps(qry.to_dict()))
for _ in range(ATTEMPT_TIME):
try:
res = self.es.delete_by_query(
index=index_name,
body=Search().query(qry).to_dict(),
refresh=True)
return res["deleted"]
except ConnectionTimeout:
self.logger.exception("ES request timeout")
time.sleep(3)
self._connect()
continue
except Exception as e:
self.logger.warning("ESConnection.delete got exception: " + str(e))
if re.search(r"(not_found)", str(e), re.IGNORECASE):
return 0
return 0
"""
Helper functions for search result
"""
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
res_fields = {}
if not fields:
return {}
for doc in self._get_source(res):
message = self.get_message_from_es_doc(doc)
m = {}
for n, v in message.items():
if n not in fields:
continue
if isinstance(v, list):
m[n] = v
continue
if n in ["message_id", "source_id", "valid_at", "invalid_at", "forget_at", "status"] and isinstance(v, (int, float, bool)):
m[n] = v
continue
if not isinstance(v, str):
m[n] = str(v)
else:
m[n] = v
if m:
res_fields[doc["id"]] = m
return res_fields
| {
"repo_id": "infiniflow/ragflow",
"file_path": "memory/utils/es_conn.py",
"license": "Apache License 2.0",
"lines": 526,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:memory/utils/infinity_conn.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import json
import copy
from infinity.common import InfinityException, SortType
from infinity.errors import ErrorCode
from common.decorator import singleton
import pandas as pd
from common.doc_store.doc_store_base import MatchExpr, MatchTextExpr, MatchDenseExpr, FusionExpr, OrderByExpr
from common.doc_store.infinity_conn_base import InfinityConnectionBase
from common.time_utils import date_string_to_timestamp
@singleton
class InfinityConnection(InfinityConnectionBase):
def __init__(self):
super().__init__(mapping_file_name="message_infinity_mapping.json", table_name_prefix="memory_")
"""
Dataframe and fields convert
"""
@staticmethod
def field_keyword(field_name: str):
# no keywords right now
return False
@staticmethod
def convert_message_field_to_infinity(field_name: str, table_fields: list[str]=None):
match field_name:
case "message_type":
return "message_type_kwd"
case "status":
return "status_int"
case "content_embed":
if not table_fields:
raise Exception("Can't convert 'content_embed' to vector field name with empty table fields.")
vector_field = [tf for tf in table_fields if re.match(r"q_\d+_vec", tf)]
if not vector_field:
raise Exception("Can't convert 'content_embed' to vector field name. No match field name found.")
return vector_field[0]
case _:
return field_name
@staticmethod
def convert_infinity_field_to_message(field_name: str):
if field_name.startswith("message_type"):
return "message_type"
if field_name.startswith("status"):
return "status"
if re.match(r"q_\d+_vec", field_name):
return "content_embed"
return field_name
def convert_select_fields(self, output_fields: list[str], table_fields: list[str]=None) -> list[str]:
return list({self.convert_message_field_to_infinity(f, table_fields) for f in output_fields})
@staticmethod
def convert_matching_field(field_weight_str: str) -> str:
tokens = field_weight_str.split("^")
field = tokens[0]
if field == "content":
field = "content@ft_content_rag_fine"
tokens[0] = field
return "^".join(tokens)
@staticmethod
def convert_condition_and_order_field(field_name: str):
match field_name:
case "message_type":
return "message_type_kwd"
case "status":
return "status_int"
case "valid_at":
return "valid_at_flt"
case "invalid_at":
return "invalid_at_flt"
case "forget_at":
return "forget_at_flt"
case _:
return field_name
"""
CRUD operations
"""
def search(
self,
select_fields: list[str],
highlight_fields: list[str],
condition: dict,
match_expressions: list[MatchExpr],
order_by: OrderByExpr,
offset: int,
limit: int,
index_names: str | list[str],
memory_ids: list[str],
agg_fields: list[str] | None = None,
rank_feature: dict | None = None,
hide_forgotten: bool = True,
) -> tuple[pd.DataFrame, int]:
"""
BUG: Infinity returns empty for a highlight field if the query string doesn't use that field.
"""
if isinstance(index_names, str):
index_names = index_names.split(",")
assert isinstance(index_names, list) and len(index_names) > 0
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
df_list = list()
table_list = list()
if hide_forgotten:
condition.update({"must_not": {"exists": "forget_at_flt"}})
output = select_fields.copy()
if agg_fields is None:
agg_fields = []
for essential_field in ["id"] + agg_fields:
if essential_field not in output:
output.append(essential_field)
score_func = ""
score_column = ""
for matchExpr in match_expressions:
if isinstance(matchExpr, MatchTextExpr):
score_func = "score()"
score_column = "SCORE"
break
if not score_func:
for matchExpr in match_expressions:
if isinstance(matchExpr, MatchDenseExpr):
score_func = "similarity()"
score_column = "SIMILARITY"
break
if match_expressions:
if score_func not in output:
output.append(score_func)
output = [f for f in output if f != "_score"]
if limit <= 0:
# ElasticSearch default limit is 10000
limit = 10000
# Prepare expressions common to all tables
filter_cond = None
filter_fulltext = ""
if condition:
condition_dict = {self.convert_condition_and_order_field(k): v for k, v in condition.items()}
table_found = False
for indexName in index_names:
for mem_id in memory_ids:
table_name = f"{indexName}_{mem_id}"
try:
filter_cond = self.equivalent_condition_to_str(condition_dict, db_instance.get_table(table_name))
table_found = True
break
except Exception:
pass
if table_found:
break
if not table_found:
self.logger.error(f"No valid tables found for indexNames {index_names} and memoryIds {memory_ids}")
return pd.DataFrame(), 0
for matchExpr in match_expressions:
if isinstance(matchExpr, MatchTextExpr):
if filter_cond and "filter" not in matchExpr.extra_options:
matchExpr.extra_options.update({"filter": filter_cond})
matchExpr.fields = [self.convert_matching_field(field) for field in matchExpr.fields]
fields = ",".join(matchExpr.fields)
filter_fulltext = f"filter_fulltext('{fields}', '{matchExpr.matching_text}')"
if filter_cond:
filter_fulltext = f"({filter_cond}) AND {filter_fulltext}"
minimum_should_match = matchExpr.extra_options.get("minimum_should_match", 0.0)
if isinstance(minimum_should_match, float):
str_minimum_should_match = str(int(minimum_should_match * 100)) + "%"
matchExpr.extra_options["minimum_should_match"] = str_minimum_should_match
for k, v in matchExpr.extra_options.items():
if not isinstance(v, str):
matchExpr.extra_options[k] = str(v)
self.logger.debug(f"INFINITY search MatchTextExpr: {json.dumps(matchExpr.__dict__)}")
elif isinstance(matchExpr, MatchDenseExpr):
if filter_fulltext and "filter" not in matchExpr.extra_options:
matchExpr.extra_options.update({"filter": filter_fulltext})
for k, v in matchExpr.extra_options.items():
if not isinstance(v, str):
matchExpr.extra_options[k] = str(v)
similarity = matchExpr.extra_options.get("similarity")
if similarity:
matchExpr.extra_options["threshold"] = similarity
del matchExpr.extra_options["similarity"]
self.logger.debug(f"INFINITY search MatchDenseExpr: {json.dumps(matchExpr.__dict__)}")
elif isinstance(matchExpr, FusionExpr):
if matchExpr.method == "weighted_sum":
# The default is "minmax" which gives a zero score for the last doc.
matchExpr.fusion_params["normalize"] = "atan"
self.logger.debug(f"INFINITY search FusionExpr: {json.dumps(matchExpr.__dict__)}")
order_by_expr_list = list()
if order_by.fields:
for order_field in order_by.fields:
order_field_name = self.convert_condition_and_order_field(order_field[0])
if order_field[1] == 0:
order_by_expr_list.append((order_field_name, SortType.Asc))
else:
order_by_expr_list.append((order_field_name, SortType.Desc))
total_hits_count = 0
# Scatter search tables and gather the results
column_name_list = []
for indexName in index_names:
for memory_id in memory_ids:
table_name = f"{indexName}_{memory_id}"
try:
table_instance = db_instance.get_table(table_name)
except Exception:
continue
table_list.append(table_name)
if not column_name_list:
column_name_list = [r[0] for r in table_instance.show_columns().rows()]
output = self.convert_select_fields(output, column_name_list)
builder = table_instance.output(output)
if len(match_expressions) > 0:
for matchExpr in match_expressions:
if isinstance(matchExpr, MatchTextExpr):
fields = ",".join(matchExpr.fields)
builder = builder.match_text(
fields,
matchExpr.matching_text,
matchExpr.topn,
matchExpr.extra_options.copy(),
)
elif isinstance(matchExpr, MatchDenseExpr):
builder = builder.match_dense(
matchExpr.vector_column_name,
matchExpr.embedding_data,
matchExpr.embedding_data_type,
matchExpr.distance_type,
matchExpr.topn,
matchExpr.extra_options.copy(),
)
elif isinstance(matchExpr, FusionExpr):
builder = builder.fusion(matchExpr.method, matchExpr.topn, matchExpr.fusion_params)
else:
if filter_cond and len(filter_cond) > 0:
builder.filter(filter_cond)
if order_by.fields:
builder.sort(order_by_expr_list)
builder.offset(offset).limit(limit)
mem_res, extra_result = builder.option({"total_hits_count": True}).to_df()
if extra_result:
total_hits_count += int(extra_result["total_hits_count"])
self.logger.debug(f"INFINITY search table: {str(table_name)}, result: {str(mem_res)}")
df_list.append(mem_res)
self.connPool.release_conn(inf_conn)
res = self.concat_dataframes(df_list, output)
if match_expressions:
res["_score"] = res[score_column]
res = res.sort_values(by="_score", ascending=False).reset_index(drop=True)
res = res.head(limit)
self.logger.debug(f"INFINITY search final result: {str(res)}")
return res, total_hits_count
def get_forgotten_messages(self, select_fields: list[str], index_name: str, memory_id: str, limit: int=512):
condition = {"memory_id": memory_id, "exists": "forget_at_flt"}
order_by = OrderByExpr()
order_by.asc("forget_at_flt")
# query
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
table_name = f"{index_name}_{memory_id}"
table_instance = db_instance.get_table(table_name)
column_name_list = [r[0] for r in table_instance.show_columns().rows()]
output_fields = [self.convert_message_field_to_infinity(f, column_name_list) for f in select_fields]
builder = table_instance.output(output_fields)
filter_cond = self.equivalent_condition_to_str(condition, db_instance.get_table(table_name))
builder.filter(filter_cond)
order_by_expr_list = list()
if order_by.fields:
for order_field in order_by.fields:
order_field_name = self.convert_condition_and_order_field(order_field[0])
if order_field[1] == 0:
order_by_expr_list.append((order_field_name, SortType.Asc))
else:
order_by_expr_list.append((order_field_name, SortType.Desc))
builder.sort(order_by_expr_list)
builder.offset(0).limit(limit)
mem_res, _ = builder.option({"total_hits_count": True}).to_df()
res = self.concat_dataframes(mem_res, output_fields)
res.head(limit)
self.connPool.release_conn(inf_conn)
return res
def get_missing_field_message(self, select_fields: list[str], index_name: str, memory_id: str, field_name: str, limit: int=512):
condition = {"memory_id": memory_id, "must_not": {"exists": field_name}}
order_by = OrderByExpr()
order_by.asc("valid_at_flt")
# query
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
table_name = f"{index_name}_{memory_id}"
table_instance = db_instance.get_table(table_name)
column_name_list = [r[0] for r in table_instance.show_columns().rows()]
output_fields = [self.convert_message_field_to_infinity(f, column_name_list) for f in select_fields]
builder = table_instance.output(output_fields)
filter_cond = self.equivalent_condition_to_str(condition, db_instance.get_table(table_name))
builder.filter(filter_cond)
order_by_expr_list = list()
if order_by.fields:
for order_field in order_by.fields:
order_field_name = self.convert_condition_and_order_field(order_field[0])
if order_field[1] == 0:
order_by_expr_list.append((order_field_name, SortType.Asc))
else:
order_by_expr_list.append((order_field_name, SortType.Desc))
builder.sort(order_by_expr_list)
builder.offset(0).limit(limit)
mem_res, _ = builder.option({"total_hits_count": True}).to_df()
res = self.concat_dataframes(mem_res, output_fields)
res.head(limit)
self.connPool.release_conn(inf_conn)
return res
def get(self, message_id: str, index_name: str, memory_ids: list[str]) -> dict | None:
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
df_list = list()
assert isinstance(memory_ids, list)
table_list = list()
for memoryId in memory_ids:
table_name = f"{index_name}_{memoryId}"
table_list.append(table_name)
try:
table_instance = db_instance.get_table(table_name)
except Exception:
self.logger.warning(f"Table not found: {table_name}, this memory isn't created in Infinity. Maybe it is created in other document engine.")
continue
mem_res, _ = table_instance.output(["*"]).filter(f"id = '{message_id}'").to_df()
self.logger.debug(f"INFINITY get table: {str(table_list)}, result: {str(mem_res)}")
df_list.append(mem_res)
self.connPool.release_conn(inf_conn)
res = self.concat_dataframes(df_list, ["id"])
fields = set(res.columns.tolist())
res_fields = self.get_fields(res, list(fields))
return {self.convert_infinity_field_to_message(k): v for k, v in res_fields[message_id].items()} if res_fields.get(message_id) else {}
def insert(self, documents: list[dict], index_name: str, memory_id: str = None) -> list[str]:
if not documents:
return []
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
table_name = f"{index_name}_{memory_id}"
vector_size = int(len(documents[0]["content_embed"]))
try:
table_instance = db_instance.get_table(table_name)
except InfinityException as e:
# src/common/status.cppm, kTableNotExist = 3022
if e.error_code != ErrorCode.TABLE_NOT_EXIST:
raise
if vector_size == 0:
raise ValueError("Cannot infer vector size from documents")
self.create_idx(index_name, memory_id, vector_size)
table_instance = db_instance.get_table(table_name)
# embedding fields can't have a default value....
embedding_columns = []
table_columns = table_instance.show_columns().rows()
for n, ty, _, _ in table_columns:
r = re.search(r"Embedding\([a-z]+,([0-9]+)\)", ty)
if not r:
continue
embedding_columns.append((n, int(r.group(1))))
docs = copy.deepcopy(documents)
for d in docs:
assert "_id" not in d
assert "id" in d
for k, v in list(d.items()):
if k == "content_embed":
d[f"q_{vector_size}_vec"] = d["content_embed"]
d.pop("content_embed")
continue
field_name = self.convert_message_field_to_infinity(k)
if field_name in ["valid_at", "invalid_at", "forget_at"]:
d[f"{field_name}_flt"] = date_string_to_timestamp(v) if v else 0
if v is None:
d[field_name] = ""
elif self.field_keyword(k):
if isinstance(v, list):
d[k] = "###".join(v)
else:
d[k] = v
elif k == "memory_id":
if isinstance(d[k], list):
d[k] = d[k][0] # since d[k] is a list, but we need a str
else:
d[field_name] = v
if k != field_name:
d.pop(k)
for n, vs in embedding_columns:
if n in d:
continue
d[n] = [0] * vs
ids = ["'{}'".format(d["id"]) for d in docs]
str_ids = ", ".join(ids)
str_filter = f"id IN ({str_ids})"
table_instance.delete(str_filter)
table_instance.insert(docs)
self.connPool.release_conn(inf_conn)
self.logger.debug(f"INFINITY inserted into {table_name} {str_ids}.")
return []
def update(self, condition: dict, new_value: dict, index_name: str, memory_id: str) -> bool:
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
table_name = f"{index_name}_{memory_id}"
table_instance = db_instance.get_table(table_name)
columns = {}
if table_instance:
for n, ty, de, _ in table_instance.show_columns().rows():
columns[n] = (ty, de)
condition_dict = {self.convert_condition_and_order_field(k): v for k, v in condition.items()}
filter = self.equivalent_condition_to_str(condition_dict, table_instance)
update_dict = {self.convert_message_field_to_infinity(k): v for k, v in new_value.items()}
date_floats = {}
for k, v in update_dict.items():
if k in ["valid_at", "invalid_at", "forget_at"]:
date_floats[f"{k}_flt"] = date_string_to_timestamp(v) if v else 0
elif self.field_keyword(k):
if isinstance(v, list):
update_dict[k] = "###".join(v)
else:
update_dict[k] = v
elif k == "memory_id":
if isinstance(update_dict[k], list):
update_dict[k] = update_dict[k][0] # since d[k] is a list, but we need a str
else:
update_dict[k] = v
if date_floats:
update_dict.update(date_floats)
self.logger.debug(f"INFINITY update table {table_name}, filter {filter}, newValue {new_value}.")
table_instance.update(filter, update_dict)
self.connPool.release_conn(inf_conn)
return True
"""
Helper functions for search result
"""
def get_fields(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fields: list[str]) -> dict[str, dict]:
if isinstance(res, tuple):
res_df = res[0]
else:
res_df = res
if not fields:
return {}
fields_all = fields.copy()
fields_all.append("id")
fields_all = self.convert_select_fields(fields_all, res_df.columns.tolist())
column_map = {col.lower(): col for col in res_df.columns}
matched_columns = {column_map[col.lower()]: col for col in fields_all if col.lower() in column_map}
none_columns = [col for col in fields_all if col.lower() not in column_map]
selected_res = res_df[matched_columns.keys()]
selected_res = selected_res.rename(columns=matched_columns)
selected_res.drop_duplicates(subset=["id"], inplace=True)
for column in list(selected_res.columns):
k = column.lower()
if self.field_keyword(k):
selected_res[column] = selected_res[column].apply(lambda v: [kwd for kwd in v.split("###") if kwd])
else:
pass
for column in none_columns:
selected_res[column] = None
res_dict = selected_res.set_index("id").to_dict(orient="index")
return {_id: {self.convert_infinity_field_to_message(k): v for k, v in doc.items()} for _id, doc in res_dict.items()}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "memory/utils/infinity_conn.py",
"license": "Apache License 2.0",
"lines": 462,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:memory/utils/prompt_util.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional, List
from common.constants import MemoryType
from common.time_utils import current_timestamp
class PromptAssembler:
SYSTEM_BASE_TEMPLATE = """**Memory Extraction Specialist**
You are an expert at analyzing conversations to extract structured memory.
{type_specific_instructions}
**OUTPUT REQUIREMENTS:**
1. Output MUST be valid JSON
2. Follow the specified output format exactly
3. Each extracted item MUST have: content, valid_at, invalid_at
4. Timestamps in {timestamp_format} format
5. Only extract memory types specified above
6. Maximum {max_items} items per type
"""
TYPE_INSTRUCTIONS = {
MemoryType.SEMANTIC.name.lower(): """
**EXTRACT SEMANTIC KNOWLEDGE:**
- Universal facts, definitions, concepts, relationships
- Time-invariant, generally true information
- Examples: "The capital of France is Paris", "Water boils at 100°C"
**Timestamp Rules for Semantic Knowledge:**
- valid_at: When the fact became true (e.g., law enactment, discovery)
- invalid_at: When it becomes false (e.g., repeal, disproven) or empty if still true
- Default: valid_at = conversation time, invalid_at = "" for timeless facts
""",
MemoryType.EPISODIC.name.lower(): """
**EXTRACT EPISODIC KNOWLEDGE:**
- Specific experiences, events, personal stories
- Time-bound, person-specific, contextual
- Examples: "Yesterday I fixed the bug", "User reported issue last week"
**Timestamp Rules for Episodic Knowledge:**
- valid_at: Event start/occurrence time
- invalid_at: Event end time or empty if instantaneous
- Extract explicit times: "at 3 PM", "last Monday", "from X to Y"
""",
MemoryType.PROCEDURAL.name.lower(): """
**EXTRACT PROCEDURAL KNOWLEDGE:**
- Processes, methods, step-by-step instructions
- Goal-oriented, actionable, often includes conditions
- Examples: "To reset password, click...", "Debugging steps: 1)..."
**Timestamp Rules for Procedural Knowledge:**
- valid_at: When procedure becomes valid/effective
- invalid_at: When it expires/becomes obsolete or empty if current
- For version-specific: use release dates
- For best practices: invalid_at = ""
"""
}
OUTPUT_TEMPLATES = {
MemoryType.SEMANTIC.name.lower(): """
"semantic": [
{
"content": "Clear factual statement",
"valid_at": "timestamp or empty",
"invalid_at": "timestamp or empty"
}
]
""",
MemoryType.EPISODIC.name.lower(): """
"episodic": [
{
"content": "Narrative event description",
"valid_at": "event start timestamp",
"invalid_at": "event end timestamp or empty"
}
]
""",
MemoryType.PROCEDURAL.name.lower(): """
"procedural": [
{
"content": "Actionable instructions",
"valid_at": "procedure effective timestamp",
"invalid_at": "procedure expiration timestamp or empty"
}
]
"""
}
BASE_USER_PROMPT = """
**CONVERSATION:**
{conversation}
**CONVERSATION TIME:** {conversation_time}
**CURRENT TIME:** {current_time}
"""
@classmethod
def assemble_system_prompt(cls, config: dict) -> str:
types_to_extract = cls._get_types_to_extract(config["memory_type"])
type_instructions = cls._generate_type_instructions(types_to_extract)
output_format = cls._generate_output_format(types_to_extract)
full_prompt = cls.SYSTEM_BASE_TEMPLATE.format(
type_specific_instructions=type_instructions,
timestamp_format=config.get("timestamp_format", "ISO 8601"),
max_items=config.get("max_items_per_type", 5)
)
full_prompt += f"\n**REQUIRED OUTPUT FORMAT (JSON):**\n```json\n{{\n{output_format}\n}}\n```\n"
examples = cls._generate_examples(types_to_extract)
if examples:
full_prompt += f"\n**EXAMPLES:**\n{examples}\n"
return full_prompt
@staticmethod
def _get_types_to_extract(requested_types: List[str]) -> List[str]:
types = set()
for rt in requested_types:
if rt in [e.name.lower() for e in MemoryType] and rt != MemoryType.RAW.name.lower():
types.add(rt)
return list(types)
@classmethod
def _generate_type_instructions(cls, types_to_extract: List[str]) -> str:
target_types = set(types_to_extract)
instructions = [cls.TYPE_INSTRUCTIONS[mt] for mt in target_types]
return "\n".join(instructions)
@classmethod
def _generate_output_format(cls, types_to_extract: List[str]) -> str:
target_types = set(types_to_extract)
output_parts = [cls.OUTPUT_TEMPLATES[mt] for mt in target_types]
return ",\n".join(output_parts)
@staticmethod
def _generate_examples(types_to_extract: list[str]) -> str:
examples = []
if MemoryType.SEMANTIC.name.lower() in types_to_extract:
examples.append("""
**Semantic Example:**
Input: "Python lists are mutable and support various operations."
Output: {"semantic": [{"content": "Python lists are mutable data structures", "valid_at": "2024-01-15T10:00:00", "invalid_at": ""}]}
""")
if MemoryType.EPISODIC.name.lower() in types_to_extract:
examples.append("""
**Episodic Example:**
Input: "I deployed the new feature yesterday afternoon."
Output: {"episodic": [{"content": "User deployed new feature", "valid_at": "2024-01-14T14:00:00", "invalid_at": "2024-01-14T18:00:00"}]}
""")
if MemoryType.PROCEDURAL.name.lower() in types_to_extract:
examples.append("""
**Procedural Example:**
Input: "To debug API errors: 1) Check logs 2) Verify endpoints 3) Test connectivity."
Output: {"procedural": [{"content": "API error debugging: 1. Check logs 2. Verify endpoints 3. Test connectivity", "valid_at": "2024-01-15T10:00:00", "invalid_at": ""}]}
""")
return "\n".join(examples)
@classmethod
def assemble_user_prompt(
cls,
conversation: str,
conversation_time: Optional[str] = None,
current_time: Optional[str] = None
) -> str:
return cls.BASE_USER_PROMPT.format(
conversation=conversation,
conversation_time=conversation_time or "Not specified",
current_time=current_time or current_timestamp(),
)
@classmethod
def get_raw_user_prompt(cls):
return cls.BASE_USER_PROMPT
| {
"repo_id": "infiniflow/ragflow",
"file_path": "memory/utils/prompt_util.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:common/data_source/airtable_connector.py | from datetime import datetime, timezone
import logging
from typing import Any, Generator
import requests
from pyairtable import Api as AirtableApi
from common.data_source.config import AIRTABLE_CONNECTOR_SIZE_THRESHOLD, INDEX_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import ConnectorMissingCredentialError
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, GenerateDocumentsOutput, SecondsSinceUnixEpoch
from common.data_source.utils import extract_size_bytes, get_file_ext
class AirtableClientNotSetUpError(PermissionError):
def __init__(self) -> None:
super().__init__(
"Airtable client is not set up. Did you forget to call load_credentials()?"
)
class AirtableConnector(LoadConnector, PollConnector):
"""
Lightweight Airtable connector.
This connector ingests Airtable attachments as raw blobs without
parsing file content or generating text/image sections.
"""
def __init__(
self,
base_id: str,
table_name_or_id: str,
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
self.base_id = base_id
self.table_name_or_id = table_name_or_id
self.batch_size = batch_size
self._airtable_client: AirtableApi | None = None
self.size_threshold = AIRTABLE_CONNECTOR_SIZE_THRESHOLD
# -------------------------
# Credentials
# -------------------------
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._airtable_client = AirtableApi(credentials["airtable_access_token"])
return None
@property
def airtable_client(self) -> AirtableApi:
if not self._airtable_client:
raise AirtableClientNotSetUpError()
return self._airtable_client
# -------------------------
# Core logic
# -------------------------
def load_from_state(self) -> GenerateDocumentsOutput:
"""
Fetch all Airtable records and ingest attachments as raw blobs.
Each attachment is converted into a single Document(blob=...).
"""
if not self._airtable_client:
raise ConnectorMissingCredentialError("Airtable credentials not loaded")
table = self.airtable_client.table(self.base_id, self.table_name_or_id)
records = table.all()
logging.info(
f"Starting Airtable blob ingestion for table {self.table_name_or_id}, "
f"{len(records)} records found."
)
batch: list[Document] = []
for record in records:
record_id = record.get("id")
fields = record.get("fields", {})
created_time = record.get("createdTime")
for field_value in fields.values():
# We only care about attachment fields (lists of dicts with url/filename)
if not isinstance(field_value, list):
continue
for attachment in field_value:
url = attachment.get("url")
filename = attachment.get("filename")
attachment_id = attachment.get("id")
if not url or not filename or not attachment_id:
continue
try:
resp = requests.get(url, timeout=30)
resp.raise_for_status()
content = resp.content
except Exception:
logging.exception(
f"Failed to download attachment {filename} "
f"(record={record_id})"
)
continue
size_bytes = extract_size_bytes(attachment)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{filename} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
batch.append(
Document(
id=f"airtable:{record_id}:{attachment_id}",
blob=content,
source=DocumentSource.AIRTABLE,
semantic_identifier=filename,
extension=get_file_ext(filename),
size_bytes=size_bytes if size_bytes else 0,
doc_updated_at=datetime.strptime(created_time, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=timezone.utc)
)
)
if len(batch) >= self.batch_size:
yield batch
batch = []
if batch:
yield batch
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Generator[list[Document], None, None]:
"""Poll source to get documents"""
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
for batch in self.load_from_state():
filtered: list[Document] = []
for doc in batch:
if not doc.doc_updated_at:
continue
doc_dt = doc.doc_updated_at.astimezone(timezone.utc)
if start_dt <= doc_dt < end_dt:
filtered.append(doc)
if filtered:
yield filtered
if __name__ == "__main__":
import os
logging.basicConfig(level=logging.DEBUG)
connector = AirtableConnector("xxx","xxx")
connector.load_credentials({"airtable_access_token": os.environ.get("AIRTABLE_ACCESS_TOKEN")})
connector.validate_connector_settings()
document_batches = connector.load_from_state()
try:
first_batch = next(document_batches)
print(f"Loaded {len(first_batch)} documents in first batch.")
for doc in first_batch:
print(f"- {doc.semantic_identifier} ({doc.size_bytes} bytes)")
except StopIteration:
print("No documents available in Dropbox.") | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/airtable_connector.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/parser_config_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any
def normalize_layout_recognizer(layout_recognizer_raw: Any) -> tuple[Any, str | None]:
parser_model_name: str | None = None
layout_recognizer = layout_recognizer_raw
if isinstance(layout_recognizer_raw, str):
lowered = layout_recognizer_raw.lower()
if lowered.endswith("@mineru"):
parser_model_name = layout_recognizer_raw.rsplit("@", 1)[0]
layout_recognizer = "MinerU"
elif lowered.endswith("@paddleocr"):
parser_model_name = layout_recognizer_raw.rsplit("@", 1)[0]
layout_recognizer = "PaddleOCR"
return layout_recognizer, parser_model_name
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/parser_config_utils.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:agent/component/excel_processor.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
ExcelProcessor Component
A component for reading, processing, and generating Excel files in RAGFlow agents.
Supports multiple Excel file inputs, data transformation, and Excel output generation.
"""
import logging
import os
from abc import ABC
from io import BytesIO
import pandas as pd
from agent.component.base import ComponentBase, ComponentParamBase
from api.db.services.file_service import FileService
from api.utils.api_utils import timeout
from common import settings
from common.misc_utils import get_uuid
class ExcelProcessorParam(ComponentParamBase):
"""
Define the ExcelProcessor component parameters.
"""
def __init__(self):
super().__init__()
# Input configuration
self.input_files = [] # Variable references to uploaded files
self.operation = "read" # read, merge, transform, output
# Processing options
self.sheet_selection = "all" # all, first, or comma-separated sheet names
self.merge_strategy = "concat" # concat, join
self.join_on = "" # Column name for join operations
# Transform options (for LLM-guided transformations)
self.transform_instructions = ""
self.transform_data = "" # Variable reference to transformation data
# Output options
self.output_format = "xlsx" # xlsx, csv
self.output_filename = "output"
# Component outputs
self.outputs = {
"data": {
"type": "object",
"value": {}
},
"summary": {
"type": "str",
"value": ""
},
"markdown": {
"type": "str",
"value": ""
}
}
def check(self):
self.check_valid_value(
self.operation,
"[ExcelProcessor] Operation",
["read", "merge", "transform", "output"]
)
self.check_valid_value(
self.output_format,
"[ExcelProcessor] Output format",
["xlsx", "csv"]
)
return True
class ExcelProcessor(ComponentBase, ABC):
"""
Excel processing component for RAGFlow agents.
Operations:
- read: Parse Excel files into structured data
- merge: Combine multiple Excel files
- transform: Apply data transformations based on instructions
- output: Generate Excel file output
"""
component_name = "ExcelProcessor"
def get_input_form(self) -> dict[str, dict]:
"""Define input form for the component."""
res = {}
for ref in (self._param.input_files or []):
for k, o in self.get_input_elements_from_text(ref).items():
res[k] = {"name": o.get("name", ""), "type": "file"}
if self._param.transform_data:
for k, o in self.get_input_elements_from_text(self._param.transform_data).items():
res[k] = {"name": o.get("name", ""), "type": "object"}
return res
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
if self.check_if_canceled("ExcelProcessor processing"):
return
operation = self._param.operation.lower()
if operation == "read":
self._read_excels()
elif operation == "merge":
self._merge_excels()
elif operation == "transform":
self._transform_data()
elif operation == "output":
self._output_excel()
else:
self.set_output("summary", f"Unknown operation: {operation}")
def _get_file_content(self, file_ref: str) -> tuple[bytes, str]:
"""
Get file content from a variable reference.
Returns (content_bytes, filename).
"""
value = self._canvas.get_variable_value(file_ref)
if value is None:
return None, None
# Handle different value formats
if isinstance(value, dict):
# File reference from Begin/UserFillUp component
file_id = value.get("id") or value.get("file_id")
created_by = value.get("created_by") or self._canvas.get_tenant_id()
filename = value.get("name") or value.get("filename", "unknown.xlsx")
if file_id:
content = FileService.get_blob(created_by, file_id)
return content, filename
elif isinstance(value, list) and len(value) > 0:
# List of file references - return first
return self._get_file_content_from_list(value[0])
elif isinstance(value, str):
# Could be base64 encoded or a path
if value.startswith("data:"):
import base64
# Extract base64 content
_, encoded = value.split(",", 1)
return base64.b64decode(encoded), "uploaded.xlsx"
return None, None
def _get_file_content_from_list(self, item) -> tuple[bytes, str]:
"""Extract file content from a list item."""
if isinstance(item, dict):
return self._get_file_content(item)
return None, None
def _parse_excel_to_dataframes(self, content: bytes, filename: str) -> dict[str, pd.DataFrame]:
"""Parse Excel content into a dictionary of DataFrames (one per sheet)."""
try:
excel_file = BytesIO(content)
if filename.lower().endswith(".csv"):
df = pd.read_csv(excel_file)
return {"Sheet1": df}
else:
# Read all sheets
xlsx = pd.ExcelFile(excel_file, engine='openpyxl')
sheet_selection = self._param.sheet_selection
if sheet_selection == "all":
sheets_to_read = xlsx.sheet_names
elif sheet_selection == "first":
sheets_to_read = [xlsx.sheet_names[0]] if xlsx.sheet_names else []
else:
# Comma-separated sheet names
requested = [s.strip() for s in sheet_selection.split(",")]
sheets_to_read = [s for s in requested if s in xlsx.sheet_names]
dfs = {}
for sheet in sheets_to_read:
dfs[sheet] = pd.read_excel(xlsx, sheet_name=sheet)
return dfs
except Exception as e:
logging.error(f"Error parsing Excel file {filename}: {e}")
return {}
def _read_excels(self):
"""Read and parse Excel files into structured data."""
all_data = {}
summaries = []
markdown_parts = []
for file_ref in (self._param.input_files or []):
if self.check_if_canceled("ExcelProcessor reading"):
return
# Get variable value
value = self._canvas.get_variable_value(file_ref)
self.set_input_value(file_ref, str(value)[:200] if value else "")
if value is None:
continue
# Handle file content
content, filename = self._get_file_content(file_ref)
if content is None:
continue
# Parse Excel
dfs = self._parse_excel_to_dataframes(content, filename)
for sheet_name, df in dfs.items():
key = f"{filename}_{sheet_name}" if len(dfs) > 1 else filename
all_data[key] = df.to_dict(orient="records")
# Build summary
summaries.append(f"**{key}**: {len(df)} rows, {len(df.columns)} columns ({', '.join(df.columns.tolist()[:5])}{'...' if len(df.columns) > 5 else ''})")
# Build markdown table
markdown_parts.append(f"### {key}\n\n{df.head(10).to_markdown(index=False)}\n")
# Set outputs
self.set_output("data", all_data)
self.set_output("summary", "\n".join(summaries) if summaries else "No Excel files found")
self.set_output("markdown", "\n\n".join(markdown_parts) if markdown_parts else "No data")
def _merge_excels(self):
"""Merge multiple Excel files/sheets into one."""
all_dfs = []
for file_ref in (self._param.input_files or []):
if self.check_if_canceled("ExcelProcessor merging"):
return
value = self._canvas.get_variable_value(file_ref)
self.set_input_value(file_ref, str(value)[:200] if value else "")
if value is None:
continue
content, filename = self._get_file_content(file_ref)
if content is None:
continue
dfs = self._parse_excel_to_dataframes(content, filename)
all_dfs.extend(dfs.values())
if not all_dfs:
self.set_output("data", {})
self.set_output("summary", "No data to merge")
return
# Merge strategy
if self._param.merge_strategy == "concat":
merged_df = pd.concat(all_dfs, ignore_index=True)
elif self._param.merge_strategy == "join" and self._param.join_on:
# Join on specified column
merged_df = all_dfs[0]
for df in all_dfs[1:]:
merged_df = merged_df.merge(df, on=self._param.join_on, how="outer")
else:
merged_df = pd.concat(all_dfs, ignore_index=True)
self.set_output("data", {"merged": merged_df.to_dict(orient="records")})
self.set_output("summary", f"Merged {len(all_dfs)} sources into {len(merged_df)} rows, {len(merged_df.columns)} columns")
self.set_output("markdown", merged_df.head(20).to_markdown(index=False))
def _transform_data(self):
"""Apply transformations to data based on instructions or input data."""
# Get the data to transform
transform_ref = self._param.transform_data
if not transform_ref:
self.set_output("summary", "No transform data reference provided")
return
data = self._canvas.get_variable_value(transform_ref)
self.set_input_value(transform_ref, str(data)[:300] if data else "")
if data is None:
self.set_output("summary", "Transform data is empty")
return
# Convert to DataFrame
if isinstance(data, dict):
# Could be {"sheet": [rows]} format
if all(isinstance(v, list) for v in data.values()):
# Multiple sheets
all_markdown = []
for sheet_name, rows in data.items():
df = pd.DataFrame(rows)
all_markdown.append(f"### {sheet_name}\n\n{df.to_markdown(index=False)}")
self.set_output("data", data)
self.set_output("markdown", "\n\n".join(all_markdown))
else:
df = pd.DataFrame([data])
self.set_output("data", df.to_dict(orient="records"))
self.set_output("markdown", df.to_markdown(index=False))
elif isinstance(data, list):
df = pd.DataFrame(data)
self.set_output("data", df.to_dict(orient="records"))
self.set_output("markdown", df.to_markdown(index=False))
else:
self.set_output("data", {"raw": str(data)})
self.set_output("markdown", str(data))
self.set_output("summary", "Transformed data ready for processing")
def _output_excel(self):
"""Generate Excel file output from data."""
# Get data from transform_data reference
transform_ref = self._param.transform_data
if not transform_ref:
self.set_output("summary", "No data reference for output")
return
data = self._canvas.get_variable_value(transform_ref)
self.set_input_value(transform_ref, str(data)[:300] if data else "")
if data is None:
self.set_output("summary", "No data to output")
return
try:
# Prepare DataFrames
if isinstance(data, dict):
if all(isinstance(v, list) for v in data.values()):
# Multi-sheet format
dfs = {k: pd.DataFrame(v) for k, v in data.items()}
else:
dfs = {"Sheet1": pd.DataFrame([data])}
elif isinstance(data, list):
dfs = {"Sheet1": pd.DataFrame(data)}
else:
self.set_output("summary", "Invalid data format for Excel output")
return
# Generate output
doc_id = get_uuid()
if self._param.output_format == "csv":
# For CSV, only output first sheet
first_df = list(dfs.values())[0]
binary_content = first_df.to_csv(index=False).encode("utf-8")
filename = f"{self._param.output_filename}.csv"
else:
# Excel output
excel_io = BytesIO()
with pd.ExcelWriter(excel_io, engine='openpyxl') as writer:
for sheet_name, df in dfs.items():
# Sanitize sheet name (max 31 chars, no special chars)
safe_name = sheet_name[:31].replace("/", "_").replace("\\", "_")
df.to_excel(writer, sheet_name=safe_name, index=False)
excel_io.seek(0)
binary_content = excel_io.read()
filename = f"{self._param.output_filename}.xlsx"
# Store file
settings.STORAGE_IMPL.put(self._canvas._tenant_id, doc_id, binary_content)
# Set attachment output
self.set_output("attachment", {
"doc_id": doc_id,
"format": self._param.output_format,
"file_name": filename
})
total_rows = sum(len(df) for df in dfs.values())
self.set_output("summary", f"Generated {filename} with {len(dfs)} sheet(s), {total_rows} total rows")
self.set_output("data", {k: v.to_dict(orient="records") for k, v in dfs.items()})
logging.info(f"ExcelProcessor: Generated {filename} as {doc_id}")
except Exception as e:
logging.error(f"ExcelProcessor output error: {e}")
self.set_output("summary", f"Error generating output: {str(e)}")
def thoughts(self) -> str:
"""Return component thoughts for UI display."""
op = self._param.operation
if op == "read":
return "Reading Excel files..."
elif op == "merge":
return "Merging Excel data..."
elif op == "transform":
return "Transforming data..."
elif op == "output":
return "Generating Excel output..."
return "Processing Excel..."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/excel_processor.py",
"license": "Apache License 2.0",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/metadata_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ast
import logging
from typing import Any, Callable, Dict
import json_repair
def convert_conditions(metadata_condition):
if metadata_condition is None:
metadata_condition = {}
op_mapping = {
"is": "=",
"not is": "≠",
">=": "≥",
"<=": "≤",
"!=": "≠"
}
return [
{
"op": op_mapping.get(cond["comparison_operator"], cond["comparison_operator"]),
"key": cond["name"],
"value": cond["value"]
}
for cond in metadata_condition.get("conditions", [])
]
def meta_filter(metas: dict, filters: list[dict], logic: str = "and"):
doc_ids = set([])
def filter_out(v2docs, operator, value):
ids = []
for input, docids in v2docs.items():
if operator in ["=", "≠", ">", "<", "≥", "≤"]:
# Check if input is in YYYY-MM-DD date format
input_str = str(input).strip()
value_str = str(value).strip()
# Strict date format detection: YYYY-MM-DD (must be 10 chars with correct format)
is_input_date = (
len(input_str) == 10 and
input_str[4] == '-' and
input_str[7] == '-' and
input_str[:4].isdigit() and
input_str[5:7].isdigit() and
input_str[8:10].isdigit()
)
is_value_date = (
len(value_str) == 10 and
value_str[4] == '-' and
value_str[7] == '-' and
value_str[:4].isdigit() and
value_str[5:7].isdigit() and
value_str[8:10].isdigit()
)
if is_value_date:
# Query value is in date format
if is_input_date:
# Data is also in date format: perform date comparison
input = input_str
value = value_str
else:
# Data is not in date format: skip this record (no match)
continue
else:
# Query value is not in date format: use original logic
try:
if isinstance(input, list):
input = input[0]
input = ast.literal_eval(input)
value = ast.literal_eval(value)
except Exception:
pass
# Convert strings to lowercase
if isinstance(input, str):
input = input.lower()
if isinstance(value, str):
value = value.lower()
else:
# Non-comparison operators: maintain original logic
if isinstance(input, str):
input = input.lower()
if isinstance(value, str):
value = value.lower()
matched = False
try:
if operator == "contains":
matched = str(input).find(value) >= 0 if not isinstance(input, list) else any(str(i).find(value) >= 0 for i in input)
elif operator == "not contains":
matched = str(input).find(value) == -1 if not isinstance(input, list) else all(str(i).find(value) == -1 for i in input)
elif operator == "in":
matched = input in value if not isinstance(input, list) else all(i in value for i in input)
elif operator == "not in":
matched = input not in value if not isinstance(input, list) else all(i not in value for i in input)
elif operator == "start with":
matched = str(input).lower().startswith(str(value).lower()) if not isinstance(input, list) else "".join([str(i).lower() for i in input]).startswith(str(value).lower())
elif operator == "end with":
matched = str(input).lower().endswith(str(value).lower()) if not isinstance(input, list) else "".join([str(i).lower() for i in input]).endswith(str(value).lower())
elif operator == "empty":
matched = not input
elif operator == "not empty":
matched = bool(input)
elif operator == "=":
matched = input == value
elif operator == "≠":
matched = input != value
elif operator == ">":
matched = input > value
elif operator == "<":
matched = input < value
elif operator == "≥":
matched = input >= value
elif operator == "≤":
matched = input <= value
except Exception:
pass
if matched:
ids.extend(docids)
return ids
for f in filters:
k = f["key"]
if k not in metas:
# Key not found in metas: treat as no match
ids = []
else:
v2docs = metas[k]
ids = filter_out(v2docs, f["op"], f["value"])
if not doc_ids:
doc_ids = set(ids)
else:
if logic == "and":
doc_ids = doc_ids & set(ids)
if not doc_ids:
return []
else:
doc_ids = doc_ids | set(ids)
return list(doc_ids)
async def apply_meta_data_filter(
meta_data_filter: dict | None,
metas: dict,
question: str,
chat_mdl: Any = None,
base_doc_ids: list[str] | None = None,
manual_value_resolver: Callable[[dict], dict] | None = None,
) -> list[str] | None:
"""
Apply metadata filtering rules and return the filtered doc_ids.
meta_data_filter supports three modes:
- auto: generate filter conditions via LLM (gen_meta_filter)
- semi_auto: generate conditions using selected metadata keys only
- manual: directly filter based on provided conditions
Returns:
list of doc_ids, ["-999"] when manual filters yield no result, or None
when auto/semi_auto filters return empty.
"""
from rag.prompts.generator import gen_meta_filter # move from the top of the file to avoid circular import
doc_ids = list(base_doc_ids) if base_doc_ids else []
if not meta_data_filter:
return doc_ids
method = meta_data_filter.get("method")
if method == "auto":
filters: dict = await gen_meta_filter(chat_mdl, metas, question)
doc_ids.extend(meta_filter(metas, filters["conditions"], filters.get("logic", "and")))
if not doc_ids:
return None
elif method == "semi_auto":
selected_keys = []
constraints = {}
for item in meta_data_filter.get("semi_auto", []):
if isinstance(item, str):
selected_keys.append(item)
elif isinstance(item, dict):
key = item.get("key")
op = item.get("op")
selected_keys.append(key)
if op:
constraints[key] = op
if selected_keys:
filtered_metas = {key: metas[key] for key in selected_keys if key in metas}
if filtered_metas:
filters: dict = await gen_meta_filter(chat_mdl, filtered_metas, question, constraints=constraints)
doc_ids.extend(meta_filter(metas, filters["conditions"], filters.get("logic", "and")))
if not doc_ids:
return None
elif method == "manual":
filters = meta_data_filter.get("manual", [])
if manual_value_resolver:
filters = [manual_value_resolver(flt) for flt in filters]
doc_ids.extend(meta_filter(metas, filters, meta_data_filter.get("logic", "and")))
if filters and not doc_ids:
doc_ids = ["-999"]
return doc_ids
def dedupe_list(values: list) -> list:
seen = set()
deduped = []
for item in values:
key = str(item)
if key in seen:
continue
seen.add(key)
deduped.append(item)
return deduped
def update_metadata_to(metadata, meta):
if not meta:
return metadata
if isinstance(meta, str):
try:
meta = json_repair.loads(meta)
except Exception:
logging.error("Meta data format error.")
return metadata
if not isinstance(meta, dict):
return metadata
for k, v in meta.items():
if isinstance(v, list):
v = [vv for vv in v if isinstance(vv, str)]
if not v:
continue
v = dedupe_list(v)
if not isinstance(v, list) and not isinstance(v, str):
continue
if k not in metadata:
metadata[k] = v
continue
if isinstance(metadata[k], list):
if isinstance(v, list):
metadata[k].extend(v)
else:
metadata[k].append(v)
metadata[k] = dedupe_list(metadata[k])
else:
metadata[k] = v
return metadata
def metadata_schema(metadata: dict|list|None) -> Dict[str, Any]:
if not metadata:
return {}
properties = {}
for item in metadata:
key = item.get("key")
if not key:
continue
prop_schema = {
"description": item.get("description", "")
}
if "enum" in item and item["enum"]:
prop_schema["enum"] = item["enum"]
prop_schema["type"] = "string"
properties[key] = prop_schema
json_schema = {
"type": "object",
"properties": properties,
}
json_schema["additionalProperties"] = False
return json_schema
def _is_json_schema(obj: dict) -> bool:
if not isinstance(obj, dict):
return False
if "$schema" in obj:
return True
return obj.get("type") == "object" and isinstance(obj.get("properties"), dict)
def _is_metadata_list(obj: list) -> bool:
if not isinstance(obj, list) or not obj:
return False
for item in obj:
if not isinstance(item, dict):
return False
key = item.get("key")
if not isinstance(key, str) or not key:
return False
if "enum" in item and not isinstance(item["enum"], list):
return False
if "description" in item and not isinstance(item["description"], str):
return False
if "descriptions" in item and not isinstance(item["descriptions"], str):
return False
return True
def turn2jsonschema(obj: dict | list) -> Dict[str, Any]:
if isinstance(obj, dict) and _is_json_schema(obj):
return obj
if isinstance(obj, list) and _is_metadata_list(obj):
normalized = []
for item in obj:
description = item.get("description", item.get("descriptions", ""))
normalized_item = {
"key": item.get("key"),
"description": description,
}
if "enum" in item:
normalized_item["enum"] = item["enum"]
normalized.append(normalized_item)
return metadata_schema(normalized)
return {}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/metadata_utils.py",
"license": "Apache License 2.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/docs_generator.py | import json
import os
import re
import base64
from datetime import datetime
from abc import ABC
from io import BytesIO
from typing import Optional
from functools import partial
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_JUSTIFY
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, TableStyle, LongTable
from reportlab.lib import colors
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
from agent.component.base import ComponentParamBase
from api.utils.api_utils import timeout
from .message import Message
class PDFGeneratorParam(ComponentParamBase):
"""
Define the PDF Generator component parameters.
"""
def __init__(self):
super().__init__()
# Output format
self.output_format = "pdf" # pdf, docx, txt
# Content inputs
self.content = ""
self.title = ""
self.subtitle = ""
self.header_text = ""
self.footer_text = ""
# Images
self.logo_image = "" # base64 or file path
self.logo_position = "left" # left, center, right
self.logo_width = 2.0 # inches
self.logo_height = 1.0 # inches
# Styling
self.font_family = "Helvetica" # Helvetica, Times-Roman, Courier
self.font_size = 12
self.title_font_size = 24
self.heading1_font_size = 18
self.heading2_font_size = 16
self.heading3_font_size = 14
self.text_color = "#000000"
self.title_color = "#000000"
# Page settings
self.page_size = "A4"
self.orientation = "portrait" # portrait, landscape
self.margin_top = 1.0 # inches
self.margin_bottom = 1.0
self.margin_left = 1.0
self.margin_right = 1.0
self.line_spacing = 1.2
# Output settings
self.filename = ""
self.output_directory = "/tmp/pdf_outputs"
self.add_page_numbers = True
self.add_timestamp = True
# Advanced features
self.watermark_text = ""
self.enable_toc = False
self.outputs = {
"file_path": {"value": "", "type": "string"},
"pdf_base64": {"value": "", "type": "string"},
"download": {"value": "", "type": "string"},
"success": {"value": False, "type": "boolean"}
}
def check(self):
self.check_empty(self.content, "[PDFGenerator] Content")
self.check_valid_value(self.output_format, "[PDFGenerator] Output format", ["pdf", "docx", "txt"])
self.check_valid_value(self.logo_position, "[PDFGenerator] Logo position", ["left", "center", "right"])
self.check_valid_value(self.font_family, "[PDFGenerator] Font family",
["Helvetica", "Times-Roman", "Courier", "Helvetica-Bold", "Times-Bold"])
self.check_valid_value(self.page_size, "[PDFGenerator] Page size", ["A4", "Letter"])
self.check_valid_value(self.orientation, "[PDFGenerator] Orientation", ["portrait", "landscape"])
self.check_positive_number(self.font_size, "[PDFGenerator] Font size")
self.check_positive_number(self.margin_top, "[PDFGenerator] Margin top")
class PDFGenerator(Message, ABC):
component_name = "PDFGenerator"
# Track if Unicode fonts have been registered
_unicode_fonts_registered = False
_unicode_font_name = None
_unicode_font_bold_name = None
@classmethod
def _reset_font_cache(cls):
"""Reset font registration cache - useful for testing"""
cls._unicode_fonts_registered = False
cls._unicode_font_name = None
cls._unicode_font_bold_name = None
@classmethod
def _register_unicode_fonts(cls):
"""Register Unicode-compatible fonts for multi-language support.
Uses CID fonts (STSong-Light) for reliable CJK rendering as TTF fonts
have issues with glyph mapping in some ReportLab versions.
"""
# If already registered successfully, return True
if cls._unicode_fonts_registered and cls._unicode_font_name is not None:
return True
# Reset and try again if previous registration failed
cls._unicode_fonts_registered = True
cls._unicode_font_name = None
cls._unicode_font_bold_name = None
# Use CID fonts for reliable CJK support
# These are built into ReportLab and work reliably across all platforms
cid_fonts = [
'STSong-Light', # Simplified Chinese
'HeiseiMin-W3', # Japanese
'HYSMyeongJo-Medium', # Korean
]
for cid_font in cid_fonts:
try:
pdfmetrics.registerFont(UnicodeCIDFont(cid_font))
cls._unicode_font_name = cid_font
cls._unicode_font_bold_name = cid_font # CID fonts don't have bold variants
print(f"Registered CID font: {cid_font}")
break
except Exception as e:
print(f"Failed to register CID font {cid_font}: {e}")
continue
# If CID fonts fail, try TTF fonts as fallback
if not cls._unicode_font_name:
font_paths = [
'/usr/share/fonts/truetype/freefont/FreeSans.ttf',
'/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf',
]
for font_path in font_paths:
if os.path.exists(font_path):
try:
pdfmetrics.registerFont(TTFont('UnicodeFont', font_path))
cls._unicode_font_name = 'UnicodeFont'
cls._unicode_font_bold_name = 'UnicodeFont'
print(f"Registered TTF font from: {font_path}")
# Register font family
from reportlab.pdfbase.pdfmetrics import registerFontFamily
registerFontFamily('UnicodeFont', normal='UnicodeFont', bold='UnicodeFont')
break
except Exception as e:
print(f"Failed to register TTF font {font_path}: {e}")
continue
return cls._unicode_font_name is not None
@staticmethod
def _needs_unicode_font(text: str) -> bool:
"""Check if text contains CJK or other complex scripts that need special fonts.
Standard PDF fonts (Helvetica, Times, Courier) support:
- Basic Latin, Extended Latin, Cyrillic, Greek
CID fonts are needed for:
- CJK (Chinese, Japanese, Korean)
- Arabic, Hebrew (RTL scripts)
- Thai, Hindi, and other Indic scripts
"""
if not text:
return False
for char in text:
code = ord(char)
# CJK Unified Ideographs and related ranges
if 0x4E00 <= code <= 0x9FFF: # CJK Unified Ideographs
return True
if 0x3400 <= code <= 0x4DBF: # CJK Extension A
return True
if 0x3000 <= code <= 0x303F: # CJK Symbols and Punctuation
return True
if 0x3040 <= code <= 0x309F: # Hiragana
return True
if 0x30A0 <= code <= 0x30FF: # Katakana
return True
if 0xAC00 <= code <= 0xD7AF: # Hangul Syllables
return True
if 0x1100 <= code <= 0x11FF: # Hangul Jamo
return True
# Arabic and Hebrew (RTL scripts)
if 0x0600 <= code <= 0x06FF: # Arabic
return True
if 0x0590 <= code <= 0x05FF: # Hebrew
return True
# Indic scripts
if 0x0900 <= code <= 0x097F: # Devanagari (Hindi)
return True
if 0x0E00 <= code <= 0x0E7F: # Thai
return True
return False
def _get_font_for_content(self, content: str) -> tuple:
"""Get appropriate font based on content, returns (regular_font, bold_font)"""
if self._needs_unicode_font(content):
if self._register_unicode_fonts() and self._unicode_font_name:
return (self._unicode_font_name, self._unicode_font_bold_name or self._unicode_font_name)
else:
print("Warning: Content contains non-Latin characters but no Unicode font available")
# Fall back to configured font
return (self._param.font_family, self._get_bold_font_name())
def _get_active_font(self) -> str:
"""Get the currently active font (Unicode or configured)"""
return getattr(self, '_active_font', self._param.font_family)
def _get_active_bold_font(self) -> str:
"""Get the currently active bold font (Unicode or configured)"""
return getattr(self, '_active_bold_font', self._get_bold_font_name())
def _get_bold_font_name(self) -> str:
"""Get the correct bold variant of the current font family"""
font_map = {
'Helvetica': 'Helvetica-Bold',
'Times-Roman': 'Times-Bold',
'Courier': 'Courier-Bold',
}
font_family = getattr(self._param, 'font_family', 'Helvetica')
if 'Bold' in font_family:
return font_family
return font_map.get(font_family, 'Helvetica-Bold')
def get_input_form(self) -> dict[str, dict]:
return {
"content": {
"name": "Content",
"type": "text"
},
"title": {
"name": "Title",
"type": "line"
},
"subtitle": {
"name": "Subtitle",
"type": "line"
}
}
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
import traceback
try:
# Get content from parameters (which may contain variable references)
content = self._param.content or ""
title = self._param.title or ""
subtitle = self._param.subtitle or ""
# Log PDF generation start
print(f"Starting PDF generation for title: {title}, content length: {len(content)} chars")
# Resolve variable references in content using canvas
if content and self._canvas.is_reff(content):
# Extract the variable reference and get its value
import re
matches = re.findall(self.variable_ref_patt, content, flags=re.DOTALL)
for match in matches:
try:
var_value = self._canvas.get_variable_value(match)
if var_value:
# Handle partial (streaming) content
if isinstance(var_value, partial):
resolved_content = ""
for chunk in var_value():
resolved_content += chunk
content = content.replace("{" + match + "}", resolved_content)
else:
content = content.replace("{" + match + "}", str(var_value))
except Exception as e:
print(f"Error resolving variable {match}: {str(e)}")
content = content.replace("{" + match + "}", f"[ERROR: {str(e)}]")
# Also process with get_kwargs for any remaining variables
if content:
try:
content, _ = self.get_kwargs(content, kwargs)
except Exception as e:
print(f"Error processing content with get_kwargs: {str(e)}")
# Process template variables in title
if title and self._canvas.is_reff(title):
try:
matches = re.findall(self.variable_ref_patt, title, flags=re.DOTALL)
for match in matches:
var_value = self._canvas.get_variable_value(match)
if var_value:
title = title.replace("{" + match + "}", str(var_value))
except Exception as e:
print(f"Error processing title variables: {str(e)}")
if title:
try:
title, _ = self.get_kwargs(title, kwargs)
except Exception:
pass
# Process template variables in subtitle
if subtitle and self._canvas.is_reff(subtitle):
try:
matches = re.findall(self.variable_ref_patt, subtitle, flags=re.DOTALL)
for match in matches:
var_value = self._canvas.get_variable_value(match)
if var_value:
subtitle = subtitle.replace("{" + match + "}", str(var_value))
except Exception as e:
print(f"Error processing subtitle variables: {str(e)}")
if subtitle:
try:
subtitle, _ = self.get_kwargs(subtitle, kwargs)
except Exception:
pass
# If content is still empty, check if it was passed directly
if not content:
content = kwargs.get("content", "")
# Generate document based on format
try:
output_format = self._param.output_format or "pdf"
if output_format == "pdf":
file_path, doc_base64 = self._generate_pdf(content, title, subtitle)
mime_type = "application/pdf"
elif output_format == "docx":
file_path, doc_base64 = self._generate_docx(content, title, subtitle)
mime_type = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
elif output_format == "txt":
file_path, doc_base64 = self._generate_txt(content, title, subtitle)
mime_type = "text/plain"
else:
raise Exception(f"Unsupported output format: {output_format}")
filename = os.path.basename(file_path)
# Verify the file was created and has content
if not os.path.exists(file_path):
raise Exception(f"Document file was not created: {file_path}")
file_size = os.path.getsize(file_path)
if file_size == 0:
raise Exception(f"Document file is empty: {file_path}")
print(f"Successfully generated {output_format.upper()}: {file_path} (Size: {file_size} bytes)")
# Set outputs
self.set_output("file_path", file_path)
self.set_output("pdf_base64", doc_base64) # Keep same output name for compatibility
self.set_output("success", True)
# Create download info object
download_info = {
"filename": filename,
"path": file_path,
"base64": doc_base64,
"mime_type": mime_type,
"size": file_size
}
# Output download info as JSON string so it can be used in Message block
download_json = json.dumps(download_info)
self.set_output("download", download_json)
return download_info
except Exception as e:
error_msg = f"Error in _generate_pdf: {str(e)}\n{traceback.format_exc()}"
print(error_msg)
self.set_output("success", False)
self.set_output("_ERROR", f"PDF generation failed: {str(e)}")
raise
except Exception as e:
error_msg = f"Error in PDFGenerator._invoke: {str(e)}\n{traceback.format_exc()}"
print(error_msg)
self.set_output("success", False)
self.set_output("_ERROR", f"PDF generation failed: {str(e)}")
raise
def _generate_pdf(self, content: str, title: str = "", subtitle: str = "") -> tuple[str, str]:
"""Generate PDF from markdown-style content with improved error handling and concurrency support"""
import uuid
import traceback
# Create output directory if it doesn't exist
os.makedirs(self._param.output_directory, exist_ok=True)
# Initialize variables that need cleanup
buffer = None
temp_file_path = None
file_path = None
try:
# Generate a unique filename to prevent conflicts
if self._param.filename:
base_name = os.path.splitext(self._param.filename)[0]
filename = f"{base_name}_{uuid.uuid4().hex[:8]}.pdf"
else:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"document_{timestamp}_{uuid.uuid4().hex[:8]}.pdf"
file_path = os.path.join(self._param.output_directory, filename)
temp_file_path = f"{file_path}.tmp"
# Setup page size
page_size = A4
if self._param.orientation == "landscape":
page_size = (A4[1], A4[0])
# Create PDF buffer and document
buffer = BytesIO()
doc = SimpleDocTemplate(
buffer,
pagesize=page_size,
topMargin=self._param.margin_top * inch,
bottomMargin=self._param.margin_bottom * inch,
leftMargin=self._param.margin_left * inch,
rightMargin=self._param.margin_right * inch
)
# Build story (content elements)
story = []
# Combine all text content for Unicode font detection
all_text = f"{title} {subtitle} {content}"
# IMPORTANT: Register Unicode fonts BEFORE creating any styles or Paragraphs
# This ensures the font family is available for ReportLab's HTML parser
if self._needs_unicode_font(all_text):
self._register_unicode_fonts()
styles = self._create_styles(all_text)
# Add logo if provided
if self._param.logo_image:
logo = self._add_logo()
if logo:
story.append(logo)
story.append(Spacer(1, 0.3 * inch))
# Add title
if title:
title_para = Paragraph(self._escape_html(title), styles['PDFTitle'])
story.append(title_para)
story.append(Spacer(1, 0.2 * inch))
# Add subtitle
if subtitle:
subtitle_para = Paragraph(self._escape_html(subtitle), styles['PDFSubtitle'])
story.append(subtitle_para)
story.append(Spacer(1, 0.3 * inch))
# Add timestamp if enabled
if self._param.add_timestamp:
timestamp_text = f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
timestamp_para = Paragraph(timestamp_text, styles['Italic'])
story.append(timestamp_para)
story.append(Spacer(1, 0.2 * inch))
# Parse and add content
content_elements = self._parse_markdown_content(content, styles)
story.extend(content_elements)
# Build PDF
doc.build(story, onFirstPage=self._add_page_decorations, onLaterPages=self._add_page_decorations)
# Get PDF bytes
pdf_bytes = buffer.getvalue()
# Write to temporary file first
with open(temp_file_path, 'wb') as f:
f.write(pdf_bytes)
# Atomic rename to final filename (works across different filesystems)
if os.path.exists(file_path):
os.remove(file_path)
os.rename(temp_file_path, file_path)
# Verify the file was created and has content
if not os.path.exists(file_path):
raise Exception(f"Failed to create output file: {file_path}")
file_size = os.path.getsize(file_path)
if file_size == 0:
raise Exception(f"Generated PDF is empty: {file_path}")
# Convert to base64
pdf_base64 = base64.b64encode(pdf_bytes).decode('utf-8')
return file_path, pdf_base64
except Exception as e:
# Clean up any temporary files on error
if temp_file_path and os.path.exists(temp_file_path):
try:
os.remove(temp_file_path)
except Exception as cleanup_error:
print(f"Error cleaning up temporary file: {cleanup_error}")
error_msg = f"Error generating PDF: {str(e)}\n{traceback.format_exc()}"
print(error_msg)
raise Exception(f"PDF generation failed: {str(e)}")
finally:
# Ensure buffer is always closed
if buffer is not None:
try:
buffer.close()
except Exception as close_error:
print(f"Error closing buffer: {close_error}")
def _create_styles(self, content: str = ""):
"""Create custom paragraph styles with Unicode font support if needed"""
# Check if content contains CJK characters that need special fonts
needs_cjk = self._needs_unicode_font(content)
if needs_cjk:
# Use CID fonts for CJK content
if self._register_unicode_fonts() and self._unicode_font_name:
regular_font = self._unicode_font_name
bold_font = self._unicode_font_bold_name or self._unicode_font_name
print(f"Using CID font for CJK content: {regular_font}")
else:
# Fall back to configured font if CID fonts unavailable
regular_font = self._param.font_family
bold_font = self._get_bold_font_name()
print(f"Warning: CJK content detected but no CID font available, using {regular_font}")
else:
# Use user-selected font for Latin-only content
regular_font = self._param.font_family
bold_font = self._get_bold_font_name()
print(f"Using configured font: {regular_font}")
# Store active fonts as instance variables for use in other methods
self._active_font = regular_font
self._active_bold_font = bold_font
# Get fresh style sheet
styles = getSampleStyleSheet()
# Helper function to get the correct bold font name
def get_bold_font(font_family):
"""Get the correct bold variant of a font family"""
# If using Unicode font, return the Unicode bold
if font_family in ('UnicodeFont', self._unicode_font_name):
return bold_font
font_map = {
'Helvetica': 'Helvetica-Bold',
'Times-Roman': 'Times-Bold',
'Courier': 'Courier-Bold',
}
if 'Bold' in font_family:
return font_family
return font_map.get(font_family, 'Helvetica-Bold')
# Use detected font instead of configured font for non-Latin content
active_font = regular_font
active_bold_font = bold_font
# Helper function to add or update style
def add_or_update_style(name, **kwargs):
if name in styles:
# Update existing style
style = styles[name]
for key, value in kwargs.items():
setattr(style, key, value)
else:
# Add new style
styles.add(ParagraphStyle(name=name, **kwargs))
# IMPORTANT: Update base styles to use Unicode font for non-Latin content
# This ensures ALL text uses the correct font, not just our custom styles
add_or_update_style('Normal', fontName=active_font)
add_or_update_style('BodyText', fontName=active_font)
add_or_update_style('Bullet', fontName=active_font)
add_or_update_style('Heading1', fontName=active_bold_font)
add_or_update_style('Heading2', fontName=active_bold_font)
add_or_update_style('Heading3', fontName=active_bold_font)
add_or_update_style('Title', fontName=active_bold_font)
# Title style
add_or_update_style(
'PDFTitle',
parent=styles['Heading1'],
fontSize=self._param.title_font_size,
textColor=colors.HexColor(self._param.title_color),
fontName=active_bold_font,
alignment=TA_CENTER,
spaceAfter=12
)
# Subtitle style
add_or_update_style(
'PDFSubtitle',
parent=styles['Heading2'],
fontSize=self._param.heading2_font_size,
textColor=colors.HexColor(self._param.text_color),
fontName=active_font,
alignment=TA_CENTER,
spaceAfter=12
)
# Custom heading styles
add_or_update_style(
'CustomHeading1',
parent=styles['Heading1'],
fontSize=self._param.heading1_font_size,
fontName=active_bold_font,
textColor=colors.HexColor(self._param.text_color),
spaceAfter=12,
spaceBefore=12
)
add_or_update_style(
'CustomHeading2',
parent=styles['Heading2'],
fontSize=self._param.heading2_font_size,
fontName=active_bold_font,
textColor=colors.HexColor(self._param.text_color),
spaceAfter=10,
spaceBefore=10
)
add_or_update_style(
'CustomHeading3',
parent=styles['Heading3'],
fontSize=self._param.heading3_font_size,
fontName=active_bold_font,
textColor=colors.HexColor(self._param.text_color),
spaceAfter=8,
spaceBefore=8
)
# Body text style
add_or_update_style(
'CustomBody',
parent=styles['BodyText'],
fontSize=self._param.font_size,
fontName=active_font,
textColor=colors.HexColor(self._param.text_color),
leading=self._param.font_size * self._param.line_spacing,
alignment=TA_JUSTIFY
)
# Bullet style
add_or_update_style(
'CustomBullet',
parent=styles['BodyText'],
fontSize=self._param.font_size,
fontName=active_font,
textColor=colors.HexColor(self._param.text_color),
leftIndent=20,
bulletIndent=10
)
# Code style (keep Courier for code blocks)
add_or_update_style(
'PDFCode',
parent=styles.get('Code', styles['Normal']),
fontSize=self._param.font_size - 1,
fontName='Courier',
textColor=colors.HexColor('#333333'),
backColor=colors.HexColor('#f5f5f5'),
leftIndent=20,
rightIndent=20
)
# Italic style
add_or_update_style(
'Italic',
parent=styles['Normal'],
fontSize=self._param.font_size,
fontName=active_font,
textColor=colors.HexColor(self._param.text_color)
)
return styles
def _parse_markdown_content(self, content: str, styles):
"""Parse markdown-style content and convert to PDF elements"""
elements = []
lines = content.split('\n')
i = 0
while i < len(lines):
line = lines[i].strip()
# Skip empty lines
if not line:
elements.append(Spacer(1, 0.1 * inch))
i += 1
continue
# Horizontal rule
if line == '---' or line == '___':
elements.append(Spacer(1, 0.1 * inch))
elements.append(self._create_horizontal_line())
elements.append(Spacer(1, 0.1 * inch))
i += 1
continue
# Heading 1
if line.startswith('# ') and not line.startswith('## '):
text = line[2:].strip()
elements.append(Paragraph(self._format_inline(text), styles['CustomHeading1']))
i += 1
continue
# Heading 2
if line.startswith('## ') and not line.startswith('### '):
text = line[3:].strip()
elements.append(Paragraph(self._format_inline(text), styles['CustomHeading2']))
i += 1
continue
# Heading 3
if line.startswith('### '):
text = line[4:].strip()
elements.append(Paragraph(self._format_inline(text), styles['CustomHeading3']))
i += 1
continue
# Bullet list
if line.startswith('- ') or line.startswith('* '):
bullet_items = []
while i < len(lines) and (lines[i].strip().startswith('- ') or lines[i].strip().startswith('* ')):
item_text = lines[i].strip()[2:].strip()
formatted = self._format_inline(item_text)
bullet_items.append(f"• {formatted}")
i += 1
for item in bullet_items:
elements.append(Paragraph(item, styles['CustomBullet']))
continue
# Numbered list
if re.match(r'^\d+\.\s', line):
numbered_items = []
counter = 1
while i < len(lines) and re.match(r'^\d+\.\s', lines[i].strip()):
item_text = re.sub(r'^\d+\.\s', '', lines[i].strip())
numbered_items.append(f"{counter}. {self._format_inline(item_text)}")
counter += 1
i += 1
for item in numbered_items:
elements.append(Paragraph(item, styles['CustomBullet']))
continue
# Table detection (markdown table must start with |)
if line.startswith('|') and '|' in line:
table_lines = []
# Collect all consecutive lines that look like table rows
while i < len(lines) and lines[i].strip() and '|' in lines[i]:
table_lines.append(lines[i].strip())
i += 1
# Only process if we have at least 2 lines (header + separator or header + data)
if len(table_lines) >= 2:
table_elements = self._create_table(table_lines)
if table_elements:
# _create_table now returns a list of elements
elements.extend(table_elements)
elements.append(Spacer(1, 0.2 * inch))
continue
else:
# Not a valid table, treat as regular text
i -= len(table_lines) # Reset position
# Code block
if line.startswith('```'):
code_lines = []
i += 1
while i < len(lines) and not lines[i].strip().startswith('```'):
code_lines.append(lines[i])
i += 1
if i < len(lines):
i += 1
code_text = '\n'.join(code_lines)
elements.append(Paragraph(self._escape_html(code_text), styles['PDFCode']))
elements.append(Spacer(1, 0.1 * inch))
continue
# Regular paragraph
paragraph_lines = [line]
i += 1
while i < len(lines) and lines[i].strip() and not self._is_special_line(lines[i]):
paragraph_lines.append(lines[i].strip())
i += 1
paragraph_text = ' '.join(paragraph_lines)
formatted_text = self._format_inline(paragraph_text)
elements.append(Paragraph(formatted_text, styles['CustomBody']))
elements.append(Spacer(1, 0.1 * inch))
return elements
def _is_special_line(self, line: str) -> bool:
"""Check if line is a special markdown element"""
line = line.strip()
return (line.startswith('#') or
line.startswith('- ') or
line.startswith('* ') or
re.match(r'^\d+\.\s', line) or
line in ['---', '___'] or
line.startswith('```') or
'|' in line)
def _format_inline(self, text: str) -> str:
"""Format inline markdown (bold, italic, code)"""
# First, escape the existing HTML to not conflict with our tags.
text = self._escape_html(text)
# IMPORTANT: Process inline code FIRST to protect underscores inside code blocks
# Use a placeholder to protect code blocks from italic/bold processing
code_blocks = []
def save_code(match):
code_blocks.append(match.group(1))
return f"__CODE_BLOCK_{len(code_blocks)-1}__"
text = re.sub(r'`(.+?)`', save_code, text)
# Then, apply markdown formatting.
# The order is important: from most specific to least specific.
# Bold and italic combined: ***text*** or ___text___
text = re.sub(r'\*\*\*(.+?)\*\*\*', r'<b><i>\1</i></b>', text)
text = re.sub(r'___(.+?)___', r'<b><i>\1</i></b>', text)
# Bold: **text** or __text__
text = re.sub(r'\*\*(.+?)\*\*', r'<b>\1</b>', text)
text = re.sub(r'__([^_]+?)__', r'<b>\1</b>', text) # More restrictive to avoid matching placeholders
# Italic: *text* or _text_ (but not underscores in words like variable_name)
text = re.sub(r'\*([^*]+?)\*', r'<i>\1</i>', text)
# Only match _text_ when surrounded by spaces or at start/end, not mid-word underscores
text = re.sub(r'(?<![a-zA-Z0-9])_([^_]+?)_(?![a-zA-Z0-9])', r'<i>\1</i>', text)
# Restore code blocks with proper formatting
for i, code in enumerate(code_blocks):
text = text.replace(f"__CODE_BLOCK_{i}__", f'<font name="Courier" color="#333333">{code}</font>')
return text
def _escape_html(self, text: str) -> str:
"""Escape HTML special characters and clean up markdown.
Args:
text: Input text that may contain HTML or markdown
Returns:
str: Cleaned and escaped text
"""
if not text:
return ""
# Ensure we're working with a string
text = str(text)
# Remove HTML form elements and tags
text = re.sub(r'<input[^>]*>', '', text, flags=re.IGNORECASE) # Remove input tags
text = re.sub(r'<textarea[^>]*>.*?</textarea>', '', text, flags=re.IGNORECASE | re.DOTALL) # Remove textarea
text = re.sub(r'<select[^>]*>.*?</select>', '', text, flags=re.IGNORECASE | re.DOTALL) # Remove select
text = re.sub(r'<button[^>]*>.*?</button>', '', text, flags=re.IGNORECASE | re.DOTALL) # Remove buttons
text = re.sub(r'<form[^>]*>.*?</form>', '', text, flags=re.IGNORECASE | re.DOTALL) # Remove forms
# Remove other common HTML tags (but preserve content)
text = re.sub(r'<div[^>]*>', '', text, flags=re.IGNORECASE)
text = re.sub(r'</div>', '', text, flags=re.IGNORECASE)
text = re.sub(r'<span[^>]*>', '', text, flags=re.IGNORECASE)
text = re.sub(r'</span>', '', text, flags=re.IGNORECASE)
text = re.sub(r'<p[^>]*>', '', text, flags=re.IGNORECASE)
text = re.sub(r'</p>', '\n', text, flags=re.IGNORECASE)
# First, handle common markdown table artifacts
text = re.sub(r'^[|\-\s:]+$', '', text, flags=re.MULTILINE) # Remove separator lines
text = re.sub(r'^\s*\|\s*|\s*\|\s*$', '', text) # Remove leading/trailing pipes
text = re.sub(r'\s*\|\s*', ' | ', text) # Normalize pipes
# Remove markdown links, but keep other formatting characters for _format_inline
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) # Remove markdown links
# Escape HTML special characters
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
# Clean up excessive whitespace
text = re.sub(r'\n\s*\n\s*\n+', '\n\n', text) # Multiple blank lines to double
text = re.sub(r' +', ' ', text) # Multiple spaces to single
return text.strip()
def _get_cell_style(self, row_idx: int, is_header: bool = False, font_size: int = None) -> 'ParagraphStyle':
"""Get the appropriate style for a table cell."""
styles = getSampleStyleSheet()
# Helper function to get the correct bold font name
def get_bold_font(font_family):
font_map = {
'Helvetica': 'Helvetica-Bold',
'Times-Roman': 'Times-Bold',
'Courier': 'Courier-Bold',
}
if 'Bold' in font_family:
return font_family
return font_map.get(font_family, 'Helvetica-Bold')
if is_header:
return ParagraphStyle(
'TableHeader',
parent=styles['Normal'],
fontSize=self._param.font_size,
fontName=self._get_active_bold_font(),
textColor=colors.whitesmoke,
alignment=TA_CENTER,
leading=self._param.font_size * 1.2,
wordWrap='CJK'
)
else:
font_size = font_size or (self._param.font_size - 1)
return ParagraphStyle(
'TableCell',
parent=styles['Normal'],
fontSize=font_size,
fontName=self._get_active_font(),
textColor=colors.black,
alignment=TA_LEFT,
leading=font_size * 1.15,
wordWrap='CJK'
)
def _convert_table_to_definition_list(self, data: list[list[str]]) -> list:
"""Convert a table to a definition list format for better handling of large content.
This method handles both simple and complex tables, including those with nested content.
It ensures that large cell content is properly wrapped and paginated.
"""
elements = []
styles = getSampleStyleSheet()
# Base styles
base_font_size = getattr(self._param, 'font_size', 10)
# Body style
body_style = ParagraphStyle(
'TableBody',
parent=styles['Normal'],
fontSize=base_font_size,
fontName=self._get_active_font(),
textColor=colors.HexColor(getattr(self._param, 'text_color', '#000000')),
spaceAfter=6,
leading=base_font_size * 1.2
)
# Label style (for field names)
label_style = ParagraphStyle(
'LabelStyle',
parent=body_style,
fontName=self._get_active_bold_font(),
textColor=colors.HexColor('#2c3e50'),
fontSize=base_font_size,
spaceAfter=4,
leftIndent=0,
leading=base_font_size * 1.3
)
# Value style (for cell content) - clean, no borders
value_style = ParagraphStyle(
'ValueStyle',
parent=body_style,
leftIndent=15,
rightIndent=0,
spaceAfter=8,
spaceBefore=2,
fontSize=base_font_size,
textColor=colors.HexColor('#333333'),
alignment=TA_JUSTIFY,
leading=base_font_size * 1.4,
# No borders or background - clean text only
)
try:
# If we have no data, return empty list
if not data or not any(data):
return elements
# Get column headers or generate them
headers = []
if data and len(data) > 0:
headers = [str(h).strip() for h in data[0]]
# If no headers or empty headers, generate them
if not any(headers):
headers = [f"Column {i+1}" for i in range(len(data[0]) if data and len(data) > 0 else 0)]
# Process each data row (skip header if it exists)
start_row = 1 if len(data) > 1 and any(data[0]) else 0
for row_idx in range(start_row, len(data)):
row = data[row_idx] if row_idx < len(data) else []
if not row:
continue
# Create a container for the row
row_elements = []
# Process each cell in the row
for col_idx in range(len(headers)):
if col_idx >= len(headers):
continue
# Get cell content
cell_text = str(row[col_idx]).strip() if col_idx < len(row) and row[col_idx] is not None else ""
# Skip empty cells
if not cell_text or cell_text.isspace():
continue
# Clean up markdown artifacts for regular text content
cell_text = str(cell_text) # Ensure it's a string
# Remove markdown table formatting
cell_text = re.sub(r'^[|\-\s:]+$', '', cell_text, flags=re.MULTILINE) # Remove separator lines
cell_text = re.sub(r'^\s*\|\s*|\s*\|\s*$', '', cell_text) # Remove leading/trailing pipes
cell_text = re.sub(r'\s*\|\s*', ' | ', cell_text) # Normalize pipes
cell_text = re.sub(r'\s+', ' ', cell_text).strip() # Normalize whitespace
# Remove any remaining markdown formatting
cell_text = re.sub(r'`(.*?)`', r'\1', cell_text) # Remove code ticks
cell_text = re.sub(r'\*\*(.*?)\*\*', r'\1', cell_text) # Remove bold
cell_text = re.sub(r'\*(.*?)\*', r'\1', cell_text) # Remove italic
# Clean up any HTML entities or special characters
cell_text = self._escape_html(cell_text)
# If content still looks like a table, convert it to plain text
if '|' in cell_text and ('--' in cell_text or any(cell_text.count('|') > 2 for line in cell_text.split('\n') if line.strip())):
# Convert to a simple text format
lines = [line.strip() for line in cell_text.split('\n') if line.strip()]
cell_text = ' | '.join(lines[:5]) # Join first 5 lines with pipe
if len(lines) > 5:
cell_text += '...'
# Process long content with better wrapping
max_chars_per_line = 100 # Reduced for better readability
max_paragraphs = 3 # Maximum number of paragraphs to show initially
# Split into paragraphs
paragraphs = [p for p in cell_text.split('\n\n') if p.strip()]
# If content is too long, truncate with "show more" indicator
if len(paragraphs) > max_paragraphs or any(len(p) > max_chars_per_line * 3 for p in paragraphs):
wrapped_paragraphs = []
for i, para in enumerate(paragraphs[:max_paragraphs]):
if len(para) > max_chars_per_line * 3:
# Split long paragraphs
words = para.split()
current_line = []
current_length = 0
for word in words:
if current_line and current_length + len(word) + 1 > max_chars_per_line:
wrapped_paragraphs.append(' '.join(current_line))
current_line = [word]
current_length = len(word)
else:
current_line.append(word)
current_length += len(word) + (1 if current_line else 0)
if current_line:
wrapped_paragraphs.append(' '.join(current_line))
else:
wrapped_paragraphs.append(para)
# Add "show more" indicator if there are more paragraphs
if len(paragraphs) > max_paragraphs:
wrapped_paragraphs.append(f"... and {len(paragraphs) - max_paragraphs} more paragraphs")
cell_text = '\n\n'.join(wrapped_paragraphs)
# Add label and content with clean formatting (no borders)
label_para = Paragraph(f"<b>{self._escape_html(headers[col_idx])}:</b>", label_style)
value_para = Paragraph(self._escape_html(cell_text), value_style)
# Add elements with proper spacing
row_elements.append(label_para)
row_elements.append(Spacer(1, 0.03 * 72)) # Tiny space between label and value
row_elements.append(value_para)
# Add spacing between rows
if row_elements and row_idx < len(data) - 1:
# Add a subtle horizontal line as separator
row_elements.append(Spacer(1, 0.1 * 72))
row_elements.append(self._create_horizontal_line(width=0.5, color='#e0e0e0'))
row_elements.append(Spacer(1, 0.15 * 72))
elements.extend(row_elements)
# Add some space after the table
if elements:
elements.append(Spacer(1, 0.3 * 72)) # 0.3 inches in points
except Exception as e:
# Fallback to simple text representation if something goes wrong
error_style = ParagraphStyle(
'ErrorStyle',
parent=styles['Normal'],
fontSize=base_font_size - 1,
textColor=colors.red,
backColor=colors.HexColor('#fff0f0'),
borderWidth=1,
borderColor=colors.red,
borderPadding=5
)
error_msg = [
Paragraph("<b>Error processing table:</b>", error_style),
Paragraph(str(e), error_style),
Spacer(1, 0.2 * 72)
]
# Add a simplified version of the table
try:
for row in data[:10]: # Limit to first 10 rows to avoid huge error output
error_msg.append(Paragraph(" | ".join(str(cell) for cell in row), body_style))
if len(data) > 10:
error_msg.append(Paragraph(f"... and {len(data) - 10} more rows", body_style))
except Exception:
pass
elements.extend(error_msg)
return elements
def _create_table(self, table_lines: list[str]) -> Optional[list]:
"""Create a table from markdown table syntax with robust error handling.
This method handles simple tables and falls back to a list format for complex cases.
Returns:
A list of flowables (could be a table or alternative representation)
Returns None if the table cannot be created.
"""
if not table_lines or len(table_lines) < 2:
return None
try:
# Parse table data
data = []
max_columns = 0
for line in table_lines:
# Skip separator lines (e.g., |---|---|)
if re.match(r'^\|[\s\-:]+\|$', line):
continue
# Handle empty lines within tables
if not line.strip():
continue
# Split by | and clean up cells
cells = []
in_quotes = False
current_cell = ""
# Custom split to handle escaped pipes and quoted content
for char in line[1:]: # Skip initial |
if char == '|' and not in_quotes:
cells.append(current_cell.strip())
current_cell = ""
elif char == '"':
in_quotes = not in_quotes
current_cell += char
elif char == '\\' and not in_quotes:
# Handle escaped characters
pass
else:
current_cell += char
# Add the last cell
if current_cell.strip() or len(cells) > 0:
cells.append(current_cell.strip())
# Remove empty first/last elements if they're empty (from leading/trailing |)
if cells and not cells[0]:
cells = cells[1:]
if cells and not cells[-1]:
cells = cells[:-1]
if cells:
data.append(cells)
max_columns = max(max_columns, len(cells))
if not data or max_columns == 0:
return None
# Ensure all rows have the same number of columns
for row in data:
while len(row) < max_columns:
row.append('')
# Calculate available width for table
from reportlab.lib.pagesizes import A4
page_width = A4[0] if self._param.orientation == 'portrait' else A4[1]
available_width = page_width - (self._param.margin_left + self._param.margin_right) * inch
# Check if we should use definition list format
max_cell_length = max((len(str(cell)) for row in data for cell in row), default=0)
total_rows = len(data)
# Use definition list format if:
# - Any cell is too large (> 300 chars), OR
# - More than 6 columns, OR
# - More than 20 rows, OR
# - Contains nested tables or complex structures
has_nested_tables = any('|' in cell and '---' in cell for row in data for cell in row)
has_complex_cells = any(len(str(cell)) > 150 for row in data for cell in row)
should_use_list_format = (
max_cell_length > 300 or
max_columns > 6 or
total_rows > 20 or
has_nested_tables or
has_complex_cells
)
if should_use_list_format:
return self._convert_table_to_definition_list(data)
# Process cells for normal table
processed_data = []
for row_idx, row in enumerate(data):
processed_row = []
for cell_idx, cell in enumerate(row):
cell_text = str(cell).strip() if cell is not None else ""
# Handle empty cells
if not cell_text:
processed_row.append("")
continue
# Clean up markdown table artifacts
cell_text = re.sub(r'\\\|', '|', cell_text) # Unescape pipes
cell_text = re.sub(r'\\n', '\n', cell_text) # Handle explicit newlines
# Check for nested tables
if '|' in cell_text and '---' in cell_text:
# This cell contains a nested table
nested_lines = [line.strip() for line in cell_text.split('\n') if line.strip()]
nested_table = self._create_table(nested_lines)
if nested_table:
processed_row.append(nested_table[0]) # Add the nested table
continue
# Process as regular text
font_size = self._param.font_size - 1 if row_idx > 0 else self._param.font_size
try:
style = self._get_cell_style(row_idx, is_header=(row_idx == 0), font_size=font_size)
escaped_text = self._escape_html(cell_text)
processed_row.append(Paragraph(escaped_text, style))
except Exception:
processed_row.append(self._escape_html(cell_text))
processed_data.append(processed_row)
# Calculate column widths
min_col_width = 0.5 * inch
max_cols = int(available_width / min_col_width)
if max_columns > max_cols:
return self._convert_table_to_definition_list(data)
col_width = max(min_col_width, available_width / max_columns)
col_widths = [col_width] * max_columns
# Create the table
try:
table = LongTable(processed_data, colWidths=col_widths, repeatRows=1)
# Define table style
table_style = [
('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#2c3e50')), # Darker header
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('FONTNAME', (0, 0), (-1, 0), self._get_active_bold_font()),
('FONTSIZE', (0, 0), (-1, -1), self._param.font_size - 1),
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
('BACKGROUND', (0, 1), (-1, -1), colors.HexColor('#f8f9fa')), # Lighter background
('GRID', (0, 0), (-1, -1), 0.5, colors.HexColor('#dee2e6')), # Lighter grid
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 8),
('BOTTOMPADDING', (0, 0), (-1, -1), 8),
('LEFTPADDING', (0, 0), (-1, -1), 8),
('RIGHTPADDING', (0, 0), (-1, -1), 8),
]
# Add zebra striping for better readability
for i in range(1, len(processed_data)):
if i % 2 == 0:
table_style.append(('BACKGROUND', (0, i), (-1, i), colors.HexColor('#f1f3f5')))
table.setStyle(TableStyle(table_style))
# Add a small spacer after the table
return [table, Spacer(1, 0.2 * inch)]
except Exception as table_error:
print(f"Error creating table: {table_error}")
return self._convert_table_to_definition_list(data)
except Exception as e:
print(f"Error processing table: {e}")
# Return a simple text representation of the table
try:
text_content = []
for row in data:
text_content.append(" | ".join(str(cell) for cell in row))
return [Paragraph("<br/>".join(text_content), self._get_cell_style(0))]
except Exception:
return None
def _create_horizontal_line(self, width: float = 1, color: str = None):
"""Create a horizontal line with customizable width and color
Args:
width: Line thickness in points (default: 1)
color: Hex color string (default: grey)
Returns:
HRFlowable: Horizontal line element
"""
from reportlab.platypus import HRFlowable
line_color = colors.HexColor(color) if color else colors.grey
return HRFlowable(width="100%", thickness=width, color=line_color, spaceBefore=0, spaceAfter=0)
def _add_logo(self) -> Optional[Image]:
"""Add logo image to PDF"""
try:
# Check if it's base64 or file path
if self._param.logo_image.startswith('data:image'):
# Extract base64 data
base64_data = self._param.logo_image.split(',')[1]
image_data = base64.b64decode(base64_data)
img = Image(BytesIO(image_data))
elif os.path.exists(self._param.logo_image):
img = Image(self._param.logo_image)
else:
return None
# Set size
img.drawWidth = self._param.logo_width * inch
img.drawHeight = self._param.logo_height * inch
# Set alignment
if self._param.logo_position == 'center':
img.hAlign = 'CENTER'
elif self._param.logo_position == 'right':
img.hAlign = 'RIGHT'
else:
img.hAlign = 'LEFT'
return img
except Exception as e:
print(f"Error adding logo: {e}")
return None
def _add_page_decorations(self, canvas, doc):
"""Add header, footer, page numbers, watermark"""
canvas.saveState()
# Get active font for decorations
active_font = self._get_active_font()
# Add watermark
if self._param.watermark_text:
canvas.setFont(active_font, 60)
canvas.setFillColorRGB(0.9, 0.9, 0.9, alpha=0.3)
canvas.saveState()
canvas.translate(doc.pagesize[0] / 2, doc.pagesize[1] / 2)
canvas.rotate(45)
canvas.drawCentredString(0, 0, self._param.watermark_text)
canvas.restoreState()
# Add header
if self._param.header_text:
canvas.setFont(active_font, 9)
canvas.setFillColorRGB(0.5, 0.5, 0.5)
canvas.drawString(doc.leftMargin, doc.pagesize[1] - 0.5 * inch, self._param.header_text)
# Add footer
if self._param.footer_text:
canvas.setFont(active_font, 9)
canvas.setFillColorRGB(0.5, 0.5, 0.5)
canvas.drawString(doc.leftMargin, 0.5 * inch, self._param.footer_text)
# Add page numbers
if self._param.add_page_numbers:
page_num = canvas.getPageNumber()
text = f"Page {page_num}"
canvas.setFont(active_font, 9)
canvas.setFillColorRGB(0.5, 0.5, 0.5)
canvas.drawRightString(doc.pagesize[0] - doc.rightMargin, 0.5 * inch, text)
canvas.restoreState()
def thoughts(self) -> str:
return "Generating PDF document with formatted content..."
def _generate_docx(self, content: str, title: str = "", subtitle: str = "") -> tuple[str, str]:
"""Generate DOCX from markdown-style content"""
import uuid
from docx import Document
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH
# Create output directory if it doesn't exist
os.makedirs(self._param.output_directory, exist_ok=True)
try:
# Generate filename
if self._param.filename:
base_name = os.path.splitext(self._param.filename)[0]
filename = f"{base_name}_{uuid.uuid4().hex[:8]}.docx"
else:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"document_{timestamp}_{uuid.uuid4().hex[:8]}.docx"
file_path = os.path.join(self._param.output_directory, filename)
# Create document
doc = Document()
# Add title
if title:
title_para = doc.add_heading(title, level=0)
title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
# Add subtitle
if subtitle:
subtitle_para = doc.add_heading(subtitle, level=1)
subtitle_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
# Add timestamp if enabled
if self._param.add_timestamp:
timestamp_text = f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
ts_para = doc.add_paragraph(timestamp_text)
ts_para.runs[0].italic = True
ts_para.runs[0].font.size = Pt(9)
# Parse and add content
lines = content.split('\n')
i = 0
while i < len(lines):
line = lines[i].strip()
if not line:
i += 1
continue
# Headings
if line.startswith('# ') and not line.startswith('## '):
doc.add_heading(line[2:].strip(), level=1)
elif line.startswith('## ') and not line.startswith('### '):
doc.add_heading(line[3:].strip(), level=2)
elif line.startswith('### '):
doc.add_heading(line[4:].strip(), level=3)
# Bullet list
elif line.startswith('- ') or line.startswith('* '):
doc.add_paragraph(line[2:].strip(), style='List Bullet')
# Numbered list
elif re.match(r'^\d+\.\s', line):
text = re.sub(r'^\d+\.\s', '', line)
doc.add_paragraph(text, style='List Number')
# Regular paragraph
else:
para = doc.add_paragraph(line)
para.runs[0].font.size = Pt(self._param.font_size)
i += 1
# Save document
doc.save(file_path)
# Read and encode to base64
with open(file_path, 'rb') as f:
doc_bytes = f.read()
doc_base64 = base64.b64encode(doc_bytes).decode('utf-8')
return file_path, doc_base64
except Exception as e:
raise Exception(f"DOCX generation failed: {str(e)}")
def _generate_txt(self, content: str, title: str = "", subtitle: str = "") -> tuple[str, str]:
"""Generate TXT from markdown-style content"""
import uuid
# Create output directory if it doesn't exist
os.makedirs(self._param.output_directory, exist_ok=True)
try:
# Generate filename
if self._param.filename:
base_name = os.path.splitext(self._param.filename)[0]
filename = f"{base_name}_{uuid.uuid4().hex[:8]}.txt"
else:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"document_{timestamp}_{uuid.uuid4().hex[:8]}.txt"
file_path = os.path.join(self._param.output_directory, filename)
# Build text content
text_content = []
if title:
text_content.append(title.upper())
text_content.append("=" * len(title))
text_content.append("")
if subtitle:
text_content.append(subtitle)
text_content.append("-" * len(subtitle))
text_content.append("")
if self._param.add_timestamp:
timestamp_text = f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
text_content.append(timestamp_text)
text_content.append("")
# Add content (keep markdown formatting for readability)
text_content.append(content)
# Join and save
final_text = '\n'.join(text_content)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(final_text)
# Encode to base64
txt_base64 = base64.b64encode(final_text.encode('utf-8')).decode('utf-8')
return file_path, txt_base64
except Exception as e:
raise Exception(f"TXT generation failed: {str(e)}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/docs_generator.py",
"license": "Apache License 2.0",
"lines": 1310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/box_connector.py | """Box connector"""
import logging
from datetime import datetime, timezone
from typing import Any
from box_sdk_gen import BoxClient
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
)
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document, GenerateDocumentsOutput
from common.data_source.utils import get_file_ext
class BoxConnector(LoadConnector, PollConnector):
def __init__(self, folder_id: str, batch_size: int = INDEX_BATCH_SIZE, use_marker: bool = True) -> None:
self.batch_size = batch_size
self.folder_id = "0" if not folder_id else folder_id
self.use_marker = use_marker
def load_credentials(self, auth: Any):
self.box_client = BoxClient(auth=auth)
return None
def validate_connector_settings(self):
if self.box_client is None:
raise ConnectorMissingCredentialError("Box")
try:
self.box_client.users.get_user_me()
except Exception as e:
logging.exception("[Box]: Failed to validate Box credentials")
raise ConnectorValidationError(f"Unexpected error during Box settings validation: {e}")
def _yield_files_recursive(
self,
folder_id: str,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
relative_folder_path: str = "",
) -> GenerateDocumentsOutput:
if self.box_client is None:
raise ConnectorMissingCredentialError("Box")
result = self.box_client.folders.get_folder_items(
folder_id=folder_id,
limit=self.batch_size,
usemarker=self.use_marker
)
while True:
batch: list[Document] = []
for entry in result.entries:
if entry.type == 'file' :
file = self.box_client.files.get_file_by_id(
entry.id
)
modified_time: SecondsSinceUnixEpoch | None = None
raw_time = (
getattr(file, "created_at", None)
or getattr(file, "content_created_at", None)
)
if raw_time:
modified_time = self._box_datetime_to_epoch_seconds(raw_time)
if start is not None and modified_time <= start:
continue
if end is not None and modified_time > end:
continue
content_bytes = self.box_client.downloads.download_file(file.id)
semantic_identifier = (
f"{relative_folder_path} / {file.name}"
if relative_folder_path
else file.name
)
batch.append(
Document(
id=f"box:{file.id}",
blob=content_bytes.read(),
source=DocumentSource.BOX,
semantic_identifier=semantic_identifier,
extension=get_file_ext(file.name),
doc_updated_at=modified_time,
size_bytes=file.size,
metadata=file.metadata
)
)
elif entry.type == 'folder':
child_relative_path = (
f"{relative_folder_path} / {entry.name}"
if relative_folder_path
else entry.name
)
yield from self._yield_files_recursive(
folder_id=entry.id,
start=start,
end=end,
relative_folder_path=child_relative_path
)
if batch:
yield batch
if not result.next_marker:
break
result = self.box_client.folders.get_folder_items(
folder_id=folder_id,
limit=self.batch_size,
marker=result.next_marker,
usemarker=True
)
def _box_datetime_to_epoch_seconds(self, dt: datetime) -> SecondsSinceUnixEpoch:
"""Convert a Box SDK datetime to Unix epoch seconds (UTC).
Only supports datetime; any non-datetime should be filtered out by caller.
"""
if not isinstance(dt, datetime):
raise TypeError(f"box_datetime_to_epoch_seconds expects datetime, got {type(dt)}")
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
else:
dt = dt.astimezone(timezone.utc)
return SecondsSinceUnixEpoch(int(dt.timestamp()))
def poll_source(self, start, end):
return self._yield_files_recursive(folder_id=self.folder_id, start=start, end=end)
def load_from_state(self):
return self._yield_files_recursive(folder_id=self.folder_id, start=None, end=None)
# from flask import Flask, request, redirect
# from box_sdk_gen import BoxClient, BoxOAuth, OAuthConfig, GetAuthorizeUrlOptions
# app = Flask(__name__)
# AUTH = BoxOAuth(
# OAuthConfig(client_id="8suvn9ik7qezsq2dub0ye6ubox61081z", client_secret="QScvhLgBcZrb2ck1QP1ovkutpRhI2QcN")
# )
# @app.route("/")
# def get_auth():
# auth_url = AUTH.get_authorize_url(
# options=GetAuthorizeUrlOptions(redirect_uri="http://localhost:4999/oauth2callback")
# )
# return redirect(auth_url, code=302)
# @app.route("/oauth2callback")
# def callback():
# AUTH.get_tokens_authorization_code_grant(request.args.get("code"))
# box = BoxConnector()
# box.load_credentials({"auth": AUTH})
# lst = []
# for file in box.load_from_state():
# for f in file:
# lst.append(f.semantic_identifier)
# return lst
if __name__ == "__main__":
pass
# app.run(port=4999)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/box_connector.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/db/services/memory_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
from api.db.db_models import DB, Memory, User
from api.db.services import duplicate_name
from api.db.services.common_service import CommonService
from api.utils.memory_utils import calculate_memory_type
from api.constants import MEMORY_NAME_LIMIT
from common.misc_utils import get_uuid
from common.time_utils import get_format_time, current_timestamp
from memory.utils.prompt_util import PromptAssembler
class MemoryService(CommonService):
# Service class for manage memory operations
model = Memory
@classmethod
@DB.connection_context()
def get_by_memory_id(cls, memory_id: str):
return cls.model.select().where(cls.model.id == memory_id).first()
@classmethod
@DB.connection_context()
def get_by_tenant_id(cls, tenant_id: str):
return cls.model.select().where(cls.model.tenant_id == tenant_id)
@classmethod
@DB.connection_context()
def get_all_memory(cls):
memory_list = cls.model.select()
return list(memory_list)
@classmethod
@DB.connection_context()
def get_with_owner_name_by_id(cls, memory_id: str):
fields = [
cls.model.id,
cls.model.name,
cls.model.avatar,
cls.model.tenant_id,
User.nickname.alias("owner_name"),
cls.model.memory_type,
cls.model.storage_type,
cls.model.embd_id,
cls.model.llm_id,
cls.model.permissions,
cls.model.description,
cls.model.memory_size,
cls.model.forgetting_policy,
cls.model.temperature,
cls.model.system_prompt,
cls.model.user_prompt,
cls.model.create_date,
cls.model.create_time
]
memory = cls.model.select(*fields).join(User, on=(cls.model.tenant_id == User.id)).where(
cls.model.id == memory_id
).first()
return memory
@classmethod
@DB.connection_context()
def get_by_filter(cls, filter_dict: dict, keywords: str, page: int = 1, page_size: int = 50):
fields = [
cls.model.id,
cls.model.name,
cls.model.avatar,
cls.model.tenant_id,
User.nickname.alias("owner_name"),
cls.model.memory_type,
cls.model.storage_type,
cls.model.permissions,
cls.model.description,
cls.model.create_time,
cls.model.create_date
]
memories = cls.model.select(*fields).join(User, on=(cls.model.tenant_id == User.id))
if filter_dict.get("tenant_id"):
memories = memories.where(cls.model.tenant_id.in_(filter_dict["tenant_id"]))
if filter_dict.get("memory_type"):
memory_type_int = calculate_memory_type(filter_dict["memory_type"])
memories = memories.where(cls.model.memory_type.bin_and(memory_type_int) > 0)
if filter_dict.get("storage_type"):
memories = memories.where(cls.model.storage_type == filter_dict["storage_type"])
if keywords:
memories = memories.where(cls.model.name.contains(keywords))
count = memories.count()
memories = memories.order_by(cls.model.update_time.desc())
memories = memories.paginate(page, page_size)
return list(memories.dicts()), count
@classmethod
@DB.connection_context()
def create_memory(cls, tenant_id: str, name: str, memory_type: List[str], embd_id: str, llm_id: str):
# Deduplicate name within tenant
memory_name = duplicate_name(
cls.query,
name=name,
tenant_id=tenant_id
)
if len(memory_name) > MEMORY_NAME_LIMIT:
return False, f"Memory name {memory_name} exceeds limit of {MEMORY_NAME_LIMIT}."
timestamp = current_timestamp()
format_time = get_format_time()
# build create dict
memory_info = {
"id": get_uuid(),
"name": memory_name,
"memory_type": calculate_memory_type(memory_type),
"tenant_id": tenant_id,
"embd_id": embd_id,
"llm_id": llm_id,
"system_prompt": PromptAssembler.assemble_system_prompt({"memory_type": memory_type}),
"create_time": timestamp,
"create_date": format_time,
"update_time": timestamp,
"update_date": format_time,
}
obj = cls.model(**memory_info).save(force_insert=True)
if not obj:
return False, "Could not create new memory."
db_row = cls.model.select().where(cls.model.id == memory_info["id"]).first()
return obj, db_row
@classmethod
@DB.connection_context()
def update_memory(cls, tenant_id: str, memory_id: str, update_dict: dict):
if not update_dict:
return 0
if "temperature" in update_dict and isinstance(update_dict["temperature"], str):
update_dict["temperature"] = float(update_dict["temperature"])
if "memory_type" in update_dict and isinstance(update_dict["memory_type"], list):
update_dict["memory_type"] = calculate_memory_type(update_dict["memory_type"])
if "name" in update_dict:
update_dict["name"] = duplicate_name(
cls.query,
name=update_dict["name"],
tenant_id=tenant_id
)
update_dict.update({
"update_time": current_timestamp(),
"update_date": get_format_time()
})
return cls.model.update(update_dict).where(cls.model.id == memory_id).execute()
@classmethod
@DB.connection_context()
def delete_memory(cls, memory_id: str):
return cls.delete_by_id(memory_id)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/memory_service.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/utils/memory_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
from common.constants import MemoryType
def format_ret_data_from_memory(memory):
return {
"id": memory.id,
"name": memory.name,
"avatar": memory.avatar,
"tenant_id": memory.tenant_id,
"owner_name": memory.owner_name if hasattr(memory, "owner_name") else None,
"memory_type": get_memory_type_human(memory.memory_type),
"storage_type": memory.storage_type,
"embd_id": memory.embd_id,
"llm_id": memory.llm_id,
"permissions": memory.permissions,
"description": memory.description,
"memory_size": memory.memory_size,
"forgetting_policy": memory.forgetting_policy,
"temperature": memory.temperature,
"system_prompt": memory.system_prompt,
"user_prompt": memory.user_prompt,
"create_time": memory.create_time,
"create_date": memory.create_date,
"update_time": memory.update_time,
"update_date": memory.update_date
}
def get_memory_type_human(memory_type: int) -> List[str]:
return [mem_type.name.lower() for mem_type in MemoryType if memory_type & mem_type.value]
def calculate_memory_type(memory_type_name_list: List[str]) -> int:
memory_type = 0
type_value_map = {mem_type.name.lower(): mem_type.value for mem_type in MemoryType}
for mem_type in memory_type_name_list:
if mem_type in type_value_map:
memory_type |= type_value_map[mem_type]
return memory_type
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/utils/memory_utils.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:test/testcases/test_web_api/test_memory_app/test_create_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import re
import pytest
from test_web_api.common import create_memory
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"]
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = create_memory(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestMemoryCreate:
@pytest.mark.p1
@pytest.mark.parametrize("name", ["test_memory_name", "d" * 128])
def test_name(self, WebApiAuth, name):
payload = {
"name": name,
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(0, 3)),
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
res = create_memory(WebApiAuth, payload)
assert res["code"] == 0, res
pattern = rf'^{name}|{name}(?:\((\d+)\))?$'
escaped_name = re.escape(res["data"]["name"])
assert re.match(pattern, escaped_name), res
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "Memory name cannot be empty or whitespace."),
(" ", "Memory name cannot be empty or whitespace."),
("a" * 129, f"Memory name '{'a'*129}' exceeds limit of 128."),
],
ids=["empty_name", "space_name", "too_long_name"],
)
def test_name_invalid(self, WebApiAuth, name, expected_message):
payload = {
"name": name,
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(0, 3)),
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
res = create_memory(WebApiAuth, payload)
assert res["message"] == expected_message, res
@pytest.mark.p2
@pytest.mark.parametrize("name", ["invalid_type_name", "memory_alpha"])
def test_type_invalid(self, WebApiAuth, name):
payload = {
"name": name,
"memory_type": ["something"],
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
res = create_memory(WebApiAuth, payload)
assert res["message"] == f"Memory type '{ {'something'} }' is not supported.", res
@pytest.mark.p3
def test_name_duplicated(self, WebApiAuth):
name = "duplicated_name_test"
payload = {
"name": name,
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(0, 3)),
"embd_id": "BAAI/bge-large-zh-v1.5@SILICONFLOW",
"llm_id": "glm-4-flash@ZHIPU-AI"
}
res1 = create_memory(WebApiAuth, payload)
assert res1["code"] == 0, res1
res2 = create_memory(WebApiAuth, payload)
assert res2["code"] == 0, res2
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_memory_app/test_create_memory.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_memory_app/test_list_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent.futures import ThreadPoolExecutor, as_completed
import pytest
from test_web_api.common import list_memory, get_memory_config
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = list_memory(invalid_auth)
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestCapability:
@pytest.mark.p3
def test_capability(self, WebApiAuth):
count = 100
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_memory, WebApiAuth) for i in range(count)]
responses = list(as_completed(futures))
assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.usefixtures("add_memory_func")
class TestMemoryList:
@pytest.mark.p2
def test_params_unset(self, WebApiAuth):
res = list_memory(WebApiAuth, None)
assert res["code"] == 0, res
@pytest.mark.p2
def test_params_empty(self, WebApiAuth):
res = list_memory(WebApiAuth, {})
assert res["code"] == 0, res
@pytest.mark.p1
@pytest.mark.parametrize(
"params, expected_page_size",
[
({"page": 1, "page_size": 10}, 3),
({"page": 2, "page_size": 10}, 0),
({"page": 1, "page_size": 2}, 2),
({"page": 2, "page_size": 2}, 1),
({"page": 5, "page_size": 10}, 0),
],
ids=["normal_first_page", "beyond_max_page", "normal_last_partial_page" , "normal_middle_page",
"full_data_single_page"],
)
def test_page(self, WebApiAuth, params, expected_page_size):
# have added 3 memories in fixture
res = list_memory(WebApiAuth, params)
assert res["code"] == 0, res
assert len(res["data"]["memory_list"]) == expected_page_size, res
@pytest.mark.p2
def test_filter_memory_type(self, WebApiAuth):
res = list_memory(WebApiAuth, {"memory_type": ["semantic"]})
assert res["code"] == 0, res
for memory in res["data"]["memory_list"]:
assert "semantic" in memory["memory_type"], res
@pytest.mark.p2
def test_filter_multi_memory_type(self, WebApiAuth):
res = list_memory(WebApiAuth, {"memory_type": ["episodic", "procedural"]})
assert res["code"] == 0, res
for memory in res["data"]["memory_list"]:
assert "episodic" in memory["memory_type"] or "procedural" in memory["memory_type"], res
@pytest.mark.p2
def test_filter_storage_type(self, WebApiAuth):
res = list_memory(WebApiAuth, {"storage_type": "table"})
assert res["code"] == 0, res
for memory in res["data"]["memory_list"]:
assert memory["storage_type"] == "table", res
@pytest.mark.p2
def test_match_keyword(self, WebApiAuth):
res = list_memory(WebApiAuth, {"keywords": "s"})
assert res["code"] == 0, res
for memory in res["data"]["memory_list"]:
assert "s" in memory["name"], res
@pytest.mark.p1
def test_get_config(self, WebApiAuth):
memory_list = list_memory(WebApiAuth, {})
assert memory_list["code"] == 0, memory_list
memory_config = get_memory_config(WebApiAuth, memory_list["data"]["memory_list"][0]["id"])
assert memory_config["code"] == 0, memory_config
assert memory_config["data"]["id"] == memory_list["data"]["memory_list"][0]["id"], memory_config
for field in ["name", "avatar", "tenant_id", "owner_name", "memory_type", "storage_type",
"embd_id", "llm_id", "permissions", "description", "memory_size", "forgetting_policy",
"temperature", "system_prompt", "user_prompt"]:
assert field in memory_config["data"], memory_config
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_memory_app/test_list_memory.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_memory_app/test_rm_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from test_web_api.common import (list_memory, delete_memory)
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = delete_memory(invalid_auth, "some_memory_id")
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestMemoryDelete:
@pytest.mark.p1
def test_memory_id(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
res = delete_memory(WebApiAuth, memory_ids[0])
assert res["code"] == 0, res
res = list_memory(WebApiAuth)
assert res["data"]["total_count"] == 2, res
@pytest.mark.p2
@pytest.mark.usefixtures("add_memory_func")
def test_id_wrong_uuid(self, WebApiAuth):
res = delete_memory(WebApiAuth, "d94a8dc02c9711f0930f7fbc369eab6d")
assert res["code"] == 404, res
res = list_memory(WebApiAuth)
assert len(res["data"]["memory_list"]) == 3, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_memory_app/test_rm_memory.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_memory_app/test_update_memory.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import pytest
from test_web_api.common import update_memory
from configs import INVALID_API_TOKEN
from libs.auth import RAGFlowWebApiAuth
from utils import encode_avatar
from utils.file_utils import create_image_file
class TestAuthorization:
@pytest.mark.p2
@pytest.mark.parametrize(
"invalid_auth, expected_code, expected_message",
[
(None, 401, "<Unauthorized '401: Unauthorized'>"),
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
],
ids=["empty_auth", "invalid_api_token"]
)
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
res = update_memory(invalid_auth, "memory_id")
assert res["code"] == expected_code, res
assert res["message"] == expected_message, res
class TestMemoryUpdate:
@pytest.mark.p1
@pytest.mark.parametrize("name", ["updated_memory", "f" * 128])
def test_name(self, WebApiAuth, add_memory_func, name):
memory_ids = add_memory_func
payload = {"name": name}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
pattern = rf"^{re.escape(name)}(?:\(\d+\))?$"
assert re.match(pattern, res["data"]["name"]), res
@pytest.mark.p2
@pytest.mark.parametrize(
"name, expected_message",
[
("", "Memory name cannot be empty or whitespace."),
(" ", "Memory name cannot be empty or whitespace."),
("a" * 129, f"Memory name '{'a' * 129}' exceeds limit of 128."),
]
)
def test_name_invalid(self, WebApiAuth, add_memory_func, name, expected_message):
memory_ids = add_memory_func
payload = {"name": name}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 101, res
assert res["message"] == expected_message, res
@pytest.mark.p2
def test_duplicate_name(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
payload = {"name": "Test_Memory"}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
payload = {"name": "Test_Memory"}
res = update_memory(WebApiAuth, memory_ids[1], payload)
assert res["code"] == 0, res
assert res["data"]["name"] == "Test_Memory(1)", res
@pytest.mark.p2
def test_avatar(self, WebApiAuth, add_memory_func, tmp_path):
memory_ids = add_memory_func
fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"avatar": f"data:image/png;base64,{encode_avatar(fn)}"}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["avatar"] == f"data:image/png;base64,{encode_avatar(fn)}", res
@pytest.mark.p2
def test_description(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
description = "This is a test description."
payload = {"description": description}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["description"] == description, res
@pytest.mark.p1
def test_llm(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
llm_id = "glm-4@ZHIPU-AI"
payload = {"llm_id": llm_id}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["llm_id"] == llm_id, res
@pytest.mark.p2
@pytest.mark.parametrize(
"permission",
[
"me",
"team"
],
ids=["me", "team"]
)
def test_permission(self, WebApiAuth, add_memory_func, permission):
memory_ids = add_memory_func
payload = {"permissions": permission}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["permissions"] == permission.lower().strip(), res
@pytest.mark.p1
def test_memory_size(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
memory_size = 1048576 # 1 MB
payload = {"memory_size": memory_size}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["memory_size"] == memory_size, res
@pytest.mark.p1
def test_temperature(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
temperature = 0.7
payload = {"temperature": temperature}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["temperature"] == temperature, res
@pytest.mark.p1
def test_system_prompt(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
system_prompt = "This is a system prompt."
payload = {"system_prompt": system_prompt}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["system_prompt"] == system_prompt, res
@pytest.mark.p1
def test_user_prompt(self, WebApiAuth, add_memory_func):
memory_ids = add_memory_func
user_prompt = "This is a user prompt."
payload = {"user_prompt": user_prompt}
res = update_memory(WebApiAuth, memory_ids[0], payload)
assert res["code"] == 0, res
assert res["data"]["user_prompt"] == user_prompt, res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_memory_app/test_update_memory.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:rag/llm/ocr_model.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
from typing import Any, Optional
from deepdoc.parser.mineru_parser import MinerUParser
from deepdoc.parser.paddleocr_parser import PaddleOCRParser
class Base:
def __init__(self, key: str | dict, model_name: str, **kwargs):
self.model_name = model_name
def parse_pdf(self, filepath: str, binary=None, **kwargs) -> tuple[Any, Any]:
raise NotImplementedError("Please implement parse_pdf!")
class MinerUOcrModel(Base, MinerUParser):
_FACTORY_NAME = "MinerU"
def __init__(self, key: str | dict, model_name: str, **kwargs):
Base.__init__(self, key, model_name, **kwargs)
raw_config = {}
if key:
try:
raw_config = json.loads(key)
except Exception:
raw_config = {}
# nested {"api_key": {...}} from UI
# flat {"MINERU_*": "..."} payload auto-provisioned from env vars
config = raw_config.get("api_key", raw_config)
if not isinstance(config, dict):
config = {}
def _resolve_config(key: str, env_key: str, default=""):
# lower-case keys (UI), upper-case MINERU_* (env auto-provision), env vars
return config.get(key, config.get(env_key, os.environ.get(env_key, default)))
self.mineru_api = _resolve_config("mineru_apiserver", "MINERU_APISERVER", "")
self.mineru_output_dir = _resolve_config("mineru_output_dir", "MINERU_OUTPUT_DIR", "")
self.mineru_backend = _resolve_config("mineru_backend", "MINERU_BACKEND", "pipeline")
self.mineru_server_url = _resolve_config("mineru_server_url", "MINERU_SERVER_URL", "")
self.mineru_delete_output = bool(int(_resolve_config("mineru_delete_output", "MINERU_DELETE_OUTPUT", 1)))
# Redact sensitive config keys before logging
redacted_config = {}
for k, v in config.items():
if any(sensitive_word in k.lower() for sensitive_word in ("key", "password", "token", "secret")):
redacted_config[k] = "[REDACTED]"
else:
redacted_config[k] = v
logging.info(f"Parsed MinerU config (sensitive fields redacted): {redacted_config}")
MinerUParser.__init__(self, mineru_api=self.mineru_api, mineru_server_url=self.mineru_server_url)
def check_available(self, backend: Optional[str] = None, server_url: Optional[str] = None) -> tuple[bool, str]:
backend = backend or self.mineru_backend
server_url = server_url or self.mineru_server_url
return self.check_installation(backend=backend, server_url=server_url)
def parse_pdf(self, filepath: str, binary=None, callback=None, parse_method: str = "raw", **kwargs):
ok, reason = self.check_available()
if not ok:
raise RuntimeError(f"MinerU server not accessible: {reason}")
sections, tables = MinerUParser.parse_pdf(
self,
filepath=filepath,
binary=binary,
callback=callback,
output_dir=self.mineru_output_dir,
backend=self.mineru_backend,
server_url=self.mineru_server_url,
delete_output=self.mineru_delete_output,
parse_method=parse_method,
**kwargs,
)
return sections, tables
class PaddleOCROcrModel(Base, PaddleOCRParser):
_FACTORY_NAME = "PaddleOCR"
def __init__(self, key: str | dict, model_name: str, **kwargs):
Base.__init__(self, key, model_name, **kwargs)
raw_config = {}
if key:
try:
raw_config = json.loads(key)
except Exception:
raw_config = {}
# nested {"api_key": {...}} from UI
# flat {"PADDLEOCR_*": "..."} payload auto-provisioned from env vars
config = raw_config.get("api_key", raw_config)
if not isinstance(config, dict):
config = {}
def _resolve_config(key: str, env_key: str, default=""):
# lower-case keys (UI), upper-case PADDLEOCR_* (env auto-provision), env vars
return config.get(key, config.get(env_key, os.environ.get(env_key, default)))
self.paddleocr_api_url = _resolve_config("paddleocr_api_url", "PADDLEOCR_API_URL", "")
self.paddleocr_algorithm = _resolve_config("paddleocr_algorithm", "PADDLEOCR_ALGORITHM", "PaddleOCR-VL")
self.paddleocr_access_token = _resolve_config("paddleocr_access_token", "PADDLEOCR_ACCESS_TOKEN", None)
# Redact sensitive config keys before logging
redacted_config = {}
for k, v in config.items():
if any(sensitive_word in k.lower() for sensitive_word in ("key", "password", "token", "secret")):
redacted_config[k] = "[REDACTED]"
else:
redacted_config[k] = v
logging.info(f"Parsed PaddleOCR config (sensitive fields redacted): {redacted_config}")
PaddleOCRParser.__init__(
self,
api_url=self.paddleocr_api_url,
access_token=self.paddleocr_access_token,
algorithm=self.paddleocr_algorithm,
)
def check_available(self) -> tuple[bool, str]:
return self.check_installation()
def parse_pdf(self, filepath: str, binary=None, callback=None, parse_method: str = "raw", **kwargs):
ok, reason = self.check_available()
if not ok:
raise RuntimeError(f"PaddleOCR server not accessible: {reason}")
sections, tables = PaddleOCRParser.parse_pdf(self, filepath=filepath, binary=binary, callback=callback, parse_method=parse_method, **kwargs)
return sections, tables
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/llm/ocr_model.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:run_tests.py | #!/usr/bin/env python3
#
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
import subprocess
from pathlib import Path
from typing import List
class Colors:
"""ANSI color codes for terminal output"""
RED = '\033[0;31m'
GREEN = '\033[0;32m'
YELLOW = '\033[1;33m'
BLUE = '\033[0;34m'
NC = '\033[0m' # No Color
class TestRunner:
"""RAGFlow Unit Test Runner"""
def __init__(self):
self.project_root = Path(__file__).parent.resolve()
self.ut_dir = Path(self.project_root / 'test' / 'unit_test')
# Default options
self.coverage = False
self.parallel = False
self.verbose = False
self.markers = ""
# Python interpreter path
self.python = sys.executable
@staticmethod
def print_info(message: str) -> None:
"""Print informational message"""
print(f"{Colors.BLUE}[INFO]{Colors.NC} {message}")
@staticmethod
def print_error(message: str) -> None:
"""Print error message"""
print(f"{Colors.RED}[ERROR]{Colors.NC} {message}")
@staticmethod
def show_usage() -> None:
"""Display usage information"""
usage = """
RAGFlow Unit Test Runner
Usage: python run_tests.py [OPTIONS]
OPTIONS:
-h, --help Show this help message
-c, --coverage Run tests with coverage report
-p, --parallel Run tests in parallel (requires pytest-xdist)
-v, --verbose Verbose output
-t, --test FILE Run specific test file or directory
-m, --markers MARKERS Run tests with specific markers (e.g., "unit", "integration")
EXAMPLES:
# Run all tests
python run_tests.py
# Run with coverage
python run_tests.py --coverage
# Run in parallel
python run_tests.py --parallel
# Run specific test file
python run_tests.py --test services/test_dialog_service.py
# Run only unit tests
python run_tests.py --markers "unit"
# Run tests with coverage and parallel execution
python run_tests.py --coverage --parallel
"""
print(usage)
def build_pytest_command(self) -> List[str]:
"""Build the pytest command arguments"""
cmd = ["pytest", str(self.ut_dir)]
# Add test path
# Add markers
if self.markers:
cmd.extend(["-m", self.markers])
# Add verbose flag
if self.verbose:
cmd.extend(["-vv"])
else:
cmd.append("-v")
# Add coverage
if self.coverage:
# Relative path from test directory to source code
source_path = str(self.project_root / "common")
cmd.extend([
"--cov", source_path,
"--cov-report", "html",
"--cov-report", "term"
])
# Add parallel execution
if self.parallel:
# Try to get number of CPU cores
try:
import multiprocessing
cpu_count = multiprocessing.cpu_count()
cmd.extend(["-n", str(cpu_count)])
except ImportError:
# Fallback to auto if multiprocessing not available
cmd.extend(["-n", "auto"])
# Add default options from pyproject.toml if it exists
pyproject_path = self.project_root / "pyproject.toml"
if pyproject_path.exists():
cmd.extend(["--config-file", str(pyproject_path)])
return cmd
def run_tests(self) -> bool:
"""Execute the pytest command"""
# Change to test directory
os.chdir(self.project_root)
# Build command
cmd = self.build_pytest_command()
# Print test configuration
self.print_info("Running RAGFlow Unit Tests")
self.print_info("=" * 40)
self.print_info(f"Test Directory: {self.ut_dir}")
self.print_info(f"Coverage: {self.coverage}")
self.print_info(f"Parallel: {self.parallel}")
self.print_info(f"Verbose: {self.verbose}")
if self.markers:
self.print_info(f"Markers: {self.markers}")
print(f"\n{Colors.BLUE}[EXECUTING]{Colors.NC} {' '.join(cmd)}\n")
# Run pytest
try:
result = subprocess.run(cmd, check=False)
if result.returncode == 0:
print(f"\n{Colors.GREEN}[SUCCESS]{Colors.NC} All tests passed!")
if self.coverage:
coverage_dir = self.ut_dir / "htmlcov"
if coverage_dir.exists():
index_file = coverage_dir / "index.html"
print(f"\n{Colors.BLUE}[INFO]{Colors.NC} Coverage report generated:")
print(f" {index_file}")
print("\nOpen with:")
print(f" - Windows: start {index_file}")
print(f" - macOS: open {index_file}")
print(f" - Linux: xdg-open {index_file}")
return True
else:
print(f"\n{Colors.RED}[FAILURE]{Colors.NC} Some tests failed!")
return False
except KeyboardInterrupt:
print(f"\n{Colors.YELLOW}[INTERRUPTED]{Colors.NC} Test execution interrupted by user")
return False
except Exception as e:
self.print_error(f"Failed to execute tests: {e}")
return False
def parse_arguments(self) -> bool:
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="RAGFlow Unit Test Runner",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python run_tests.py # Run all tests
python run_tests.py --coverage # Run with coverage
python run_tests.py --parallel # Run in parallel
python run_tests.py --test services/test_dialog_service.py # Run specific test
python run_tests.py --markers "unit" # Run only unit tests
"""
)
parser.add_argument(
"-c", "--coverage",
action="store_true",
help="Run tests with coverage report"
)
parser.add_argument(
"-p", "--parallel",
action="store_true",
help="Run tests in parallel (requires pytest-xdist)"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Verbose output"
)
parser.add_argument(
"-t", "--test",
type=str,
default="",
help="Run specific test file or directory"
)
parser.add_argument(
"-m", "--markers",
type=str,
default="",
help="Run tests with specific markers (e.g., 'unit', 'integration')"
)
try:
args = parser.parse_args()
# Set options
self.coverage = args.coverage
self.parallel = args.parallel
self.verbose = args.verbose
self.markers = args.markers
return True
except SystemExit:
# argparse already printed help, just exit
return False
except Exception as e:
self.print_error(f"Error parsing arguments: {e}")
return False
def run(self) -> int:
"""Main execution method"""
# Parse command line arguments
if not self.parse_arguments():
return 1
# Run tests
success = self.run_tests()
return 0 if success else 1
def main():
"""Entry point"""
runner = TestRunner()
return runner.run()
if __name__ == "__main__":
sys.exit(main()) | {
"repo_id": "infiniflow/ragflow",
"file_path": "run_tests.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/utils/gcs_conn.py | # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import datetime
from io import BytesIO
from google.cloud import storage
from google.api_core.exceptions import NotFound
from common.decorator import singleton
from common import settings
@singleton
class RAGFlowGCS:
def __init__(self):
self.client = None
self.bucket_name = None
self.__open__()
def __open__(self):
try:
if self.client:
self.client = None
except Exception:
pass
try:
self.client = storage.Client()
self.bucket_name = settings.GCS["bucket"]
except Exception:
logging.exception("Fail to connect to GCS")
def _get_blob_path(self, folder, filename):
"""Helper to construct the path: folder/filename"""
if not folder:
return filename
return f"{folder}/{filename}"
def health(self):
folder, fnm, binary = "ragflow-health", "health_check", b"_t@@@1"
try:
bucket_obj = self.client.bucket(self.bucket_name)
if not bucket_obj.exists():
logging.error(f"Health check failed: Main bucket '{self.bucket_name}' does not exist.")
return False
blob_path = self._get_blob_path(folder, fnm)
blob = bucket_obj.blob(blob_path)
blob.upload_from_file(BytesIO(binary), content_type='application/octet-stream')
return True
except Exception as e:
logging.exception(f"Health check failed: {e}")
return False
def put(self, bucket, fnm, binary, tenant_id=None):
# RENAMED PARAMETER: bucket_name -> bucket (to match interface)
for _ in range(3):
try:
bucket_obj = self.client.bucket(self.bucket_name)
blob_path = self._get_blob_path(bucket, fnm)
blob = bucket_obj.blob(blob_path)
blob.upload_from_file(BytesIO(binary), content_type='application/octet-stream')
return True
except NotFound:
logging.error(f"Fail to put: Main bucket {self.bucket_name} does not exist.")
return False
except Exception:
logging.exception(f"Fail to put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
return False
def rm(self, bucket, fnm, tenant_id=None):
# RENAMED PARAMETER: bucket_name -> bucket
try:
bucket_obj = self.client.bucket(self.bucket_name)
blob_path = self._get_blob_path(bucket, fnm)
blob = bucket_obj.blob(blob_path)
blob.delete()
except NotFound:
pass
except Exception:
logging.exception(f"Fail to remove {bucket}/{fnm}:")
def get(self, bucket, filename, tenant_id=None):
# RENAMED PARAMETER: bucket_name -> bucket
for _ in range(1):
try:
bucket_obj = self.client.bucket(self.bucket_name)
blob_path = self._get_blob_path(bucket, filename)
blob = bucket_obj.blob(blob_path)
return blob.download_as_bytes()
except NotFound:
logging.warning(f"File not found {bucket}/{filename} in {self.bucket_name}")
return None
except Exception:
logging.exception(f"Fail to get {bucket}/{filename}")
self.__open__()
time.sleep(1)
return None
def obj_exist(self, bucket, filename, tenant_id=None):
# RENAMED PARAMETER: bucket_name -> bucket
try:
bucket_obj = self.client.bucket(self.bucket_name)
blob_path = self._get_blob_path(bucket, filename)
blob = bucket_obj.blob(blob_path)
return blob.exists()
except Exception:
logging.exception(f"obj_exist {bucket}/{filename} got exception")
return False
def bucket_exists(self, bucket):
# RENAMED PARAMETER: bucket_name -> bucket
try:
bucket_obj = self.client.bucket(self.bucket_name)
return bucket_obj.exists()
except Exception:
logging.exception(f"bucket_exist check for {self.bucket_name} got exception")
return False
def get_presigned_url(self, bucket, fnm, expires, tenant_id=None):
# RENAMED PARAMETER: bucket_name -> bucket
for _ in range(10):
try:
bucket_obj = self.client.bucket(self.bucket_name)
blob_path = self._get_blob_path(bucket, fnm)
blob = bucket_obj.blob(blob_path)
expiration = expires
if isinstance(expires, int):
expiration = datetime.timedelta(seconds=expires)
url = blob.generate_signed_url(
version="v4",
expiration=expiration,
method="GET"
)
return url
except Exception:
logging.exception(f"Fail to get_presigned {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
return None
def remove_bucket(self, bucket):
# RENAMED PARAMETER: bucket_name -> bucket
try:
bucket_obj = self.client.bucket(self.bucket_name)
prefix = f"{bucket}/"
blobs = list(self.client.list_blobs(self.bucket_name, prefix=prefix))
if blobs:
bucket_obj.delete_blobs(blobs)
except Exception:
logging.exception(f"Fail to remove virtual bucket (folder) {bucket}")
def copy(self, src_bucket, src_path, dest_bucket, dest_path):
# RENAMED PARAMETERS to match original interface
try:
bucket_obj = self.client.bucket(self.bucket_name)
src_blob_path = self._get_blob_path(src_bucket, src_path)
dest_blob_path = self._get_blob_path(dest_bucket, dest_path)
src_blob = bucket_obj.blob(src_blob_path)
if not src_blob.exists():
logging.error(f"Source object not found: {src_blob_path}")
return False
bucket_obj.copy_blob(src_blob, bucket_obj, dest_blob_path)
return True
except NotFound:
logging.error(f"Copy failed: Main bucket {self.bucket_name} does not exist.")
return False
except Exception:
logging.exception(f"Fail to copy {src_bucket}/{src_path} -> {dest_bucket}/{dest_path}")
return False
def move(self, src_bucket, src_path, dest_bucket, dest_path):
try:
if self.copy(src_bucket, src_path, dest_bucket, dest_path):
self.rm(src_bucket, src_path)
return True
else:
logging.error(f"Copy failed, move aborted: {src_bucket}/{src_path}")
return False
except Exception:
logging.exception(f"Fail to move {src_bucket}/{src_path} -> {dest_bucket}/{dest_path}")
return False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/utils/gcs_conn.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/unit_test/utils/test_raptor_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for Raptor utility functions.
"""
import pytest
from rag.utils.raptor_utils import (
is_structured_file_type,
is_tabular_pdf,
should_skip_raptor,
get_skip_reason,
EXCEL_EXTENSIONS,
CSV_EXTENSIONS,
STRUCTURED_EXTENSIONS
)
class TestIsStructuredFileType:
"""Test file type detection for structured data"""
@pytest.mark.parametrize("file_type,expected", [
(".xlsx", True),
(".xls", True),
(".xlsm", True),
(".xlsb", True),
(".csv", True),
(".tsv", True),
("xlsx", True), # Without leading dot
("XLSX", True), # Uppercase
(".pdf", False),
(".docx", False),
(".txt", False),
("", False),
(None, False),
])
def test_file_type_detection(self, file_type, expected):
"""Test detection of various file types"""
assert is_structured_file_type(file_type) == expected
def test_excel_extensions_defined(self):
"""Test that Excel extensions are properly defined"""
assert ".xlsx" in EXCEL_EXTENSIONS
assert ".xls" in EXCEL_EXTENSIONS
assert len(EXCEL_EXTENSIONS) >= 4
def test_csv_extensions_defined(self):
"""Test that CSV extensions are properly defined"""
assert ".csv" in CSV_EXTENSIONS
assert ".tsv" in CSV_EXTENSIONS
def test_structured_extensions_combined(self):
"""Test that structured extensions include both Excel and CSV"""
assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)
assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)
class TestIsTabularPDF:
"""Test tabular PDF detection"""
def test_table_parser_detected(self):
"""Test that table parser is detected as tabular"""
assert is_tabular_pdf("table", {}) is True
assert is_tabular_pdf("TABLE", {}) is True
def test_html4excel_detected(self):
"""Test that html4excel config is detected as tabular"""
assert is_tabular_pdf("naive", {"html4excel": True}) is True
assert is_tabular_pdf("", {"html4excel": True}) is True
def test_non_tabular_pdf(self):
"""Test that non-tabular PDFs are not detected"""
assert is_tabular_pdf("naive", {}) is False
assert is_tabular_pdf("naive", {"html4excel": False}) is False
assert is_tabular_pdf("", {}) is False
def test_combined_conditions(self):
"""Test combined table parser and html4excel"""
assert is_tabular_pdf("table", {"html4excel": True}) is True
assert is_tabular_pdf("table", {"html4excel": False}) is True
class TestShouldSkipRaptor:
"""Test Raptor skip logic"""
def test_skip_excel_files(self):
"""Test that Excel files skip Raptor"""
assert should_skip_raptor(".xlsx") is True
assert should_skip_raptor(".xls") is True
assert should_skip_raptor(".xlsm") is True
def test_skip_csv_files(self):
"""Test that CSV files skip Raptor"""
assert should_skip_raptor(".csv") is True
assert should_skip_raptor(".tsv") is True
def test_skip_tabular_pdf_with_table_parser(self):
"""Test that tabular PDFs skip Raptor"""
assert should_skip_raptor(".pdf", parser_id="table") is True
assert should_skip_raptor("pdf", parser_id="TABLE") is True
def test_skip_tabular_pdf_with_html4excel(self):
"""Test that PDFs with html4excel skip Raptor"""
assert should_skip_raptor(".pdf", parser_config={"html4excel": True}) is True
def test_dont_skip_regular_pdf(self):
"""Test that regular PDFs don't skip Raptor"""
assert should_skip_raptor(".pdf", parser_id="naive") is False
assert should_skip_raptor(".pdf", parser_config={}) is False
def test_dont_skip_text_files(self):
"""Test that text files don't skip Raptor"""
assert should_skip_raptor(".txt") is False
assert should_skip_raptor(".docx") is False
assert should_skip_raptor(".md") is False
def test_override_with_config(self):
"""Test that auto-disable can be overridden"""
raptor_config = {"auto_disable_for_structured_data": False}
# Should not skip even for Excel files
assert should_skip_raptor(".xlsx", raptor_config=raptor_config) is False
assert should_skip_raptor(".csv", raptor_config=raptor_config) is False
assert should_skip_raptor(".pdf", parser_id="table", raptor_config=raptor_config) is False
def test_default_auto_disable_enabled(self):
"""Test that auto-disable is enabled by default"""
# Empty raptor_config should default to auto_disable=True
assert should_skip_raptor(".xlsx", raptor_config={}) is True
assert should_skip_raptor(".xlsx", raptor_config=None) is True
def test_explicit_auto_disable_enabled(self):
"""Test explicit auto-disable enabled"""
raptor_config = {"auto_disable_for_structured_data": True}
assert should_skip_raptor(".xlsx", raptor_config=raptor_config) is True
class TestGetSkipReason:
"""Test skip reason generation"""
def test_excel_skip_reason(self):
"""Test skip reason for Excel files"""
reason = get_skip_reason(".xlsx")
assert "Structured data file" in reason
assert ".xlsx" in reason
assert "auto-disabled" in reason.lower()
def test_csv_skip_reason(self):
"""Test skip reason for CSV files"""
reason = get_skip_reason(".csv")
assert "Structured data file" in reason
assert ".csv" in reason
def test_tabular_pdf_skip_reason(self):
"""Test skip reason for tabular PDFs"""
reason = get_skip_reason(".pdf", parser_id="table")
assert "Tabular PDF" in reason
assert "table" in reason.lower()
assert "auto-disabled" in reason.lower()
def test_html4excel_skip_reason(self):
"""Test skip reason for html4excel PDFs"""
reason = get_skip_reason(".pdf", parser_config={"html4excel": True})
assert "Tabular PDF" in reason
def test_no_skip_reason_for_regular_files(self):
"""Test that regular files have no skip reason"""
assert get_skip_reason(".txt") == ""
assert get_skip_reason(".docx") == ""
assert get_skip_reason(".pdf", parser_id="naive") == ""
class TestEdgeCases:
"""Test edge cases and error handling"""
def test_none_values(self):
"""Test handling of None values"""
assert should_skip_raptor(None) is False
assert should_skip_raptor("") is False
assert get_skip_reason(None) == ""
def test_empty_strings(self):
"""Test handling of empty strings"""
assert should_skip_raptor("") is False
assert get_skip_reason("") == ""
def test_case_insensitivity(self):
"""Test case insensitive handling"""
assert is_structured_file_type("XLSX") is True
assert is_structured_file_type("XlSx") is True
assert is_tabular_pdf("TABLE", {}) is True
assert is_tabular_pdf("TaBlE", {}) is True
def test_with_and_without_dot(self):
"""Test file extensions with and without leading dot"""
assert should_skip_raptor(".xlsx") is True
assert should_skip_raptor("xlsx") is True
assert should_skip_raptor(".CSV") is True
assert should_skip_raptor("csv") is True
class TestIntegrationScenarios:
"""Test real-world integration scenarios"""
def test_financial_excel_report(self):
"""Test scenario: Financial quarterly Excel report"""
file_type = ".xlsx"
parser_id = "naive"
parser_config = {}
raptor_config = {"use_raptor": True}
# Should skip Raptor
assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True
reason = get_skip_reason(file_type, parser_id, parser_config)
assert "Structured data file" in reason
def test_scientific_csv_data(self):
"""Test scenario: Scientific experimental CSV results"""
file_type = ".csv"
# Should skip Raptor
assert should_skip_raptor(file_type) is True
reason = get_skip_reason(file_type)
assert ".csv" in reason
def test_legal_contract_with_tables(self):
"""Test scenario: Legal contract PDF with tables"""
file_type = ".pdf"
parser_id = "table"
parser_config = {}
# Should skip Raptor
assert should_skip_raptor(file_type, parser_id, parser_config) is True
reason = get_skip_reason(file_type, parser_id, parser_config)
assert "Tabular PDF" in reason
def test_text_heavy_pdf_document(self):
"""Test scenario: Text-heavy PDF document"""
file_type = ".pdf"
parser_id = "naive"
parser_config = {}
# Should NOT skip Raptor
assert should_skip_raptor(file_type, parser_id, parser_config) is False
reason = get_skip_reason(file_type, parser_id, parser_config)
assert reason == ""
def test_mixed_dataset_processing(self):
"""Test scenario: Mixed dataset with various file types"""
files = [
(".xlsx", "naive", {}, True), # Excel - skip
(".csv", "naive", {}, True), # CSV - skip
(".pdf", "table", {}, True), # Tabular PDF - skip
(".pdf", "naive", {}, False), # Regular PDF - don't skip
(".docx", "naive", {}, False), # Word doc - don't skip
(".txt", "naive", {}, False), # Text file - don't skip
]
for file_type, parser_id, parser_config, expected_skip in files:
result = should_skip_raptor(file_type, parser_id, parser_config)
assert result == expected_skip, f"Failed for {file_type}"
def test_override_for_special_excel(self):
"""Test scenario: Override auto-disable for special Excel processing"""
file_type = ".xlsx"
raptor_config = {"auto_disable_for_structured_data": False}
# Should NOT skip when explicitly disabled
assert should_skip_raptor(file_type, raptor_config=raptor_config) is False
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/unit_test/utils/test_raptor_utils.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:api/apps/evaluation_app.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RAG Evaluation API Endpoints
Provides REST API for RAG evaluation functionality including:
- Dataset management
- Test case management
- Evaluation execution
- Results retrieval
- Configuration recommendations
"""
from quart import request
from api.apps import login_required, current_user
from api.db.services.evaluation_service import EvaluationService
from api.utils.api_utils import (
get_data_error_result,
get_json_result,
get_request_json,
server_error_response,
validate_request
)
from common.constants import RetCode
# ==================== Dataset Management ====================
@manager.route('/dataset/create', methods=['POST']) # noqa: F821
@login_required
@validate_request("name", "kb_ids")
async def create_dataset():
"""
Create a new evaluation dataset.
Request body:
{
"name": "Dataset name",
"description": "Optional description",
"kb_ids": ["kb_id1", "kb_id2"]
}
"""
try:
req = await get_request_json()
name = req.get("name", "").strip()
description = req.get("description", "")
kb_ids = req.get("kb_ids", [])
if not name:
return get_data_error_result(message="Dataset name cannot be empty")
if not kb_ids or not isinstance(kb_ids, list):
return get_data_error_result(message="kb_ids must be a non-empty list")
success, result = EvaluationService.create_dataset(
name=name,
description=description,
kb_ids=kb_ids,
tenant_id=current_user.id,
user_id=current_user.id
)
if not success:
return get_data_error_result(message=result)
return get_json_result(data={"dataset_id": result})
except Exception as e:
return server_error_response(e)
@manager.route('/dataset/list', methods=['GET']) # noqa: F821
@login_required
async def list_datasets():
"""
List evaluation datasets for current tenant.
Query params:
- page: Page number (default: 1)
- page_size: Items per page (default: 20)
"""
try:
page = int(request.args.get("page", 1))
page_size = int(request.args.get("page_size", 20))
result = EvaluationService.list_datasets(
tenant_id=current_user.id,
user_id=current_user.id,
page=page,
page_size=page_size
)
return get_json_result(data=result)
except Exception as e:
return server_error_response(e)
@manager.route('/dataset/<dataset_id>', methods=['GET']) # noqa: F821
@login_required
async def get_dataset(dataset_id):
"""Get dataset details by ID"""
try:
dataset = EvaluationService.get_dataset(dataset_id)
if not dataset:
return get_data_error_result(
message="Dataset not found",
code=RetCode.DATA_ERROR
)
return get_json_result(data=dataset)
except Exception as e:
return server_error_response(e)
@manager.route('/dataset/<dataset_id>', methods=['PUT']) # noqa: F821
@login_required
async def update_dataset(dataset_id):
"""
Update dataset.
Request body:
{
"name": "New name",
"description": "New description",
"kb_ids": ["kb_id1", "kb_id2"]
}
"""
try:
req = await get_request_json()
# Remove fields that shouldn't be updated
req.pop("id", None)
req.pop("tenant_id", None)
req.pop("created_by", None)
req.pop("create_time", None)
success = EvaluationService.update_dataset(dataset_id, **req)
if not success:
return get_data_error_result(message="Failed to update dataset")
return get_json_result(data={"dataset_id": dataset_id})
except Exception as e:
return server_error_response(e)
@manager.route('/dataset/<dataset_id>', methods=['DELETE']) # noqa: F821
@login_required
async def delete_dataset(dataset_id):
"""Delete dataset (soft delete)"""
try:
success = EvaluationService.delete_dataset(dataset_id)
if not success:
return get_data_error_result(message="Failed to delete dataset")
return get_json_result(data={"dataset_id": dataset_id})
except Exception as e:
return server_error_response(e)
# ==================== Test Case Management ====================
@manager.route('/dataset/<dataset_id>/case/add', methods=['POST']) # noqa: F821
@login_required
@validate_request("question")
async def add_test_case(dataset_id):
"""
Add a test case to a dataset.
Request body:
{
"question": "Test question",
"reference_answer": "Optional ground truth answer",
"relevant_doc_ids": ["doc_id1", "doc_id2"],
"relevant_chunk_ids": ["chunk_id1", "chunk_id2"],
"metadata": {"key": "value"}
}
"""
try:
req = await get_request_json()
question = req.get("question", "").strip()
if not question:
return get_data_error_result(message="Question cannot be empty")
success, result = EvaluationService.add_test_case(
dataset_id=dataset_id,
question=question,
reference_answer=req.get("reference_answer"),
relevant_doc_ids=req.get("relevant_doc_ids"),
relevant_chunk_ids=req.get("relevant_chunk_ids"),
metadata=req.get("metadata")
)
if not success:
return get_data_error_result(message=result)
return get_json_result(data={"case_id": result})
except Exception as e:
return server_error_response(e)
@manager.route('/dataset/<dataset_id>/case/import', methods=['POST']) # noqa: F821
@login_required
@validate_request("cases")
async def import_test_cases(dataset_id):
"""
Bulk import test cases.
Request body:
{
"cases": [
{
"question": "Question 1",
"reference_answer": "Answer 1",
...
},
{
"question": "Question 2",
...
}
]
}
"""
try:
req = await get_request_json()
cases = req.get("cases", [])
if not cases or not isinstance(cases, list):
return get_data_error_result(message="cases must be a non-empty list")
success_count, failure_count = EvaluationService.import_test_cases(
dataset_id=dataset_id,
cases=cases
)
return get_json_result(data={
"success_count": success_count,
"failure_count": failure_count,
"total": len(cases)
})
except Exception as e:
return server_error_response(e)
@manager.route('/dataset/<dataset_id>/cases', methods=['GET']) # noqa: F821
@login_required
async def get_test_cases(dataset_id):
"""Get all test cases for a dataset"""
try:
cases = EvaluationService.get_test_cases(dataset_id)
return get_json_result(data={"cases": cases, "total": len(cases)})
except Exception as e:
return server_error_response(e)
@manager.route('/case/<case_id>', methods=['DELETE']) # noqa: F821
@login_required
async def delete_test_case(case_id):
"""Delete a test case"""
try:
success = EvaluationService.delete_test_case(case_id)
if not success:
return get_data_error_result(message="Failed to delete test case")
return get_json_result(data={"case_id": case_id})
except Exception as e:
return server_error_response(e)
# ==================== Evaluation Execution ====================
@manager.route('/run/start', methods=['POST']) # noqa: F821
@login_required
@validate_request("dataset_id", "dialog_id")
async def start_evaluation():
"""
Start an evaluation run.
Request body:
{
"dataset_id": "dataset_id",
"dialog_id": "dialog_id",
"name": "Optional run name"
}
"""
try:
req = await get_request_json()
dataset_id = req.get("dataset_id")
dialog_id = req.get("dialog_id")
name = req.get("name")
success, result = EvaluationService.start_evaluation(
dataset_id=dataset_id,
dialog_id=dialog_id,
user_id=current_user.id,
name=name
)
if not success:
return get_data_error_result(message=result)
return get_json_result(data={"run_id": result})
except Exception as e:
return server_error_response(e)
@manager.route('/run/<run_id>', methods=['GET']) # noqa: F821
@login_required
async def get_evaluation_run(run_id):
"""Get evaluation run details"""
try:
result = EvaluationService.get_run_results(run_id)
if not result:
return get_data_error_result(
message="Evaluation run not found",
code=RetCode.DATA_ERROR
)
return get_json_result(data=result)
except Exception as e:
return server_error_response(e)
@manager.route('/run/<run_id>/results', methods=['GET']) # noqa: F821
@login_required
async def get_run_results(run_id):
"""Get detailed results for an evaluation run"""
try:
result = EvaluationService.get_run_results(run_id)
if not result:
return get_data_error_result(
message="Evaluation run not found",
code=RetCode.DATA_ERROR
)
return get_json_result(data=result)
except Exception as e:
return server_error_response(e)
@manager.route('/run/list', methods=['GET']) # noqa: F821
@login_required
async def list_evaluation_runs():
"""
List evaluation runs.
Query params:
- dataset_id: Filter by dataset (optional)
- dialog_id: Filter by dialog (optional)
- page: Page number (default: 1)
- page_size: Items per page (default: 20)
"""
try:
# TODO: Implement list_runs in EvaluationService
return get_json_result(data={"runs": [], "total": 0})
except Exception as e:
return server_error_response(e)
@manager.route('/run/<run_id>', methods=['DELETE']) # noqa: F821
@login_required
async def delete_evaluation_run(run_id):
"""Delete an evaluation run"""
try:
# TODO: Implement delete_run in EvaluationService
return get_json_result(data={"run_id": run_id})
except Exception as e:
return server_error_response(e)
# ==================== Analysis & Recommendations ====================
@manager.route('/run/<run_id>/recommendations', methods=['GET']) # noqa: F821
@login_required
async def get_recommendations(run_id):
"""Get configuration recommendations based on evaluation results"""
try:
recommendations = EvaluationService.get_recommendations(run_id)
return get_json_result(data={"recommendations": recommendations})
except Exception as e:
return server_error_response(e)
@manager.route('/compare', methods=['POST']) # noqa: F821
@login_required
@validate_request("run_ids")
async def compare_runs():
"""
Compare multiple evaluation runs.
Request body:
{
"run_ids": ["run_id1", "run_id2", "run_id3"]
}
"""
try:
req = await get_request_json()
run_ids = req.get("run_ids", [])
if not run_ids or not isinstance(run_ids, list) or len(run_ids) < 2:
return get_data_error_result(
message="run_ids must be a list with at least 2 run IDs"
)
# TODO: Implement compare_runs in EvaluationService
return get_json_result(data={"comparison": {}})
except Exception as e:
return server_error_response(e)
@manager.route('/run/<run_id>/export', methods=['GET']) # noqa: F821
@login_required
async def export_results(run_id):
"""Export evaluation results as JSON/CSV"""
try:
# format_type = request.args.get("format", "json") # TODO: Use for CSV export
result = EvaluationService.get_run_results(run_id)
if not result:
return get_data_error_result(
message="Evaluation run not found",
code=RetCode.DATA_ERROR
)
# TODO: Implement CSV export
return get_json_result(data=result)
except Exception as e:
return server_error_response(e)
# ==================== Real-time Evaluation ====================
@manager.route('/evaluate_single', methods=['POST']) # noqa: F821
@login_required
@validate_request("question", "dialog_id")
async def evaluate_single():
"""
Evaluate a single question-answer pair in real-time.
Request body:
{
"question": "Test question",
"dialog_id": "dialog_id",
"reference_answer": "Optional ground truth",
"relevant_chunk_ids": ["chunk_id1", "chunk_id2"]
}
"""
try:
# req = await get_request_json() # TODO: Use for single evaluation implementation
# TODO: Implement single evaluation
# This would execute the RAG pipeline and return metrics immediately
return get_json_result(data={
"answer": "",
"metrics": {},
"retrieved_chunks": []
})
except Exception as e:
return server_error_response(e)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/evaluation_app.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/db/services/evaluation_service.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RAG Evaluation Service
Provides functionality for evaluating RAG system performance including:
- Dataset management
- Test case management
- Evaluation execution
- Metrics computation
- Configuration recommendations
"""
import asyncio
import logging
import queue
import threading
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime
from timeit import default_timer as timer
from api.db.db_models import EvaluationDataset, EvaluationCase, EvaluationRun, EvaluationResult
from api.db.services.common_service import CommonService
from api.db.services.dialog_service import DialogService
from common.misc_utils import get_uuid
from common.time_utils import current_timestamp
from common.constants import StatusEnum
class EvaluationService(CommonService):
"""Service for managing RAG evaluations"""
model = EvaluationDataset
# ==================== Dataset Management ====================
@classmethod
def create_dataset(cls, name: str, description: str, kb_ids: List[str],
tenant_id: str, user_id: str) -> Tuple[bool, str]:
"""
Create a new evaluation dataset.
Args:
name: Dataset name
description: Dataset description
kb_ids: List of knowledge base IDs to evaluate against
tenant_id: Tenant ID
user_id: User ID who creates the dataset
Returns:
(success, dataset_id or error_message)
"""
try:
timestamp= current_timestamp()
dataset_id = get_uuid()
dataset = {
"id": dataset_id,
"tenant_id": tenant_id,
"name": name,
"description": description,
"kb_ids": kb_ids,
"created_by": user_id,
"create_time": timestamp,
"update_time": timestamp,
"status": StatusEnum.VALID.value
}
if not EvaluationDataset.create(**dataset):
return False, "Failed to create dataset"
return True, dataset_id
except Exception as e:
logging.error(f"Error creating evaluation dataset: {e}")
return False, str(e)
@classmethod
def get_dataset(cls, dataset_id: str) -> Optional[Dict[str, Any]]:
"""Get dataset by ID"""
try:
dataset = EvaluationDataset.get_by_id(dataset_id)
if dataset:
return dataset.to_dict()
return None
except Exception as e:
logging.error(f"Error getting dataset {dataset_id}: {e}")
return None
@classmethod
def list_datasets(cls, tenant_id: str, user_id: str,
page: int = 1, page_size: int = 20) -> Dict[str, Any]:
"""List datasets for a tenant"""
try:
query = EvaluationDataset.select().where(
(EvaluationDataset.tenant_id == tenant_id) &
(EvaluationDataset.status == StatusEnum.VALID.value)
).order_by(EvaluationDataset.create_time.desc())
total = query.count()
datasets = query.paginate(page, page_size)
return {
"total": total,
"datasets": [d.to_dict() for d in datasets]
}
except Exception as e:
logging.error(f"Error listing datasets: {e}")
return {"total": 0, "datasets": []}
@classmethod
def update_dataset(cls, dataset_id: str, **kwargs) -> bool:
"""Update dataset"""
try:
kwargs["update_time"] = current_timestamp()
return EvaluationDataset.update(**kwargs).where(
EvaluationDataset.id == dataset_id
).execute() > 0
except Exception as e:
logging.error(f"Error updating dataset {dataset_id}: {e}")
return False
@classmethod
def delete_dataset(cls, dataset_id: str) -> bool:
"""Soft delete dataset"""
try:
return EvaluationDataset.update(
status=StatusEnum.INVALID.value,
update_time=current_timestamp()
).where(EvaluationDataset.id == dataset_id).execute() > 0
except Exception as e:
logging.error(f"Error deleting dataset {dataset_id}: {e}")
return False
# ==================== Test Case Management ====================
@classmethod
def add_test_case(cls, dataset_id: str, question: str,
reference_answer: Optional[str] = None,
relevant_doc_ids: Optional[List[str]] = None,
relevant_chunk_ids: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None) -> Tuple[bool, str]:
"""
Add a test case to a dataset.
Args:
dataset_id: Dataset ID
question: Test question
reference_answer: Optional ground truth answer
relevant_doc_ids: Optional list of relevant document IDs
relevant_chunk_ids: Optional list of relevant chunk IDs
metadata: Optional additional metadata
Returns:
(success, case_id or error_message)
"""
try:
case_id = get_uuid()
case = {
"id": case_id,
"dataset_id": dataset_id,
"question": question,
"reference_answer": reference_answer,
"relevant_doc_ids": relevant_doc_ids,
"relevant_chunk_ids": relevant_chunk_ids,
"metadata": metadata,
"create_time": current_timestamp()
}
if not EvaluationCase.create(**case):
return False, "Failed to create test case"
return True, case_id
except Exception as e:
logging.error(f"Error adding test case: {e}")
return False, str(e)
@classmethod
def get_test_cases(cls, dataset_id: str) -> List[Dict[str, Any]]:
"""Get all test cases for a dataset"""
try:
cases = EvaluationCase.select().where(
EvaluationCase.dataset_id == dataset_id
).order_by(EvaluationCase.create_time)
return [c.to_dict() for c in cases]
except Exception as e:
logging.error(f"Error getting test cases for dataset {dataset_id}: {e}")
return []
@classmethod
def delete_test_case(cls, case_id: str) -> bool:
"""Delete a test case"""
try:
return EvaluationCase.delete().where(
EvaluationCase.id == case_id
).execute() > 0
except Exception as e:
logging.error(f"Error deleting test case {case_id}: {e}")
return False
@classmethod
def import_test_cases(cls, dataset_id: str, cases: List[Dict[str, Any]]) -> Tuple[int, int]:
"""
Bulk import test cases from a list.
Args:
dataset_id: Dataset ID
cases: List of test case dictionaries
Returns:
(success_count, failure_count)
"""
success_count = 0
failure_count = 0
case_instances = []
if not cases:
return success_count, failure_count
cur_timestamp = current_timestamp()
try:
for case_data in cases:
case_id = get_uuid()
case_info = {
"id": case_id,
"dataset_id": dataset_id,
"question": case_data.get("question", ""),
"reference_answer": case_data.get("reference_answer"),
"relevant_doc_ids": case_data.get("relevant_doc_ids"),
"relevant_chunk_ids": case_data.get("relevant_chunk_ids"),
"metadata": case_data.get("metadata"),
"create_time": cur_timestamp
}
case_instances.append(EvaluationCase(**case_info))
EvaluationCase.bulk_create(case_instances, batch_size=300)
success_count = len(case_instances)
failure_count = 0
except Exception as e:
logging.error(f"Error bulk importing test cases: {str(e)}")
failure_count = len(cases)
success_count = 0
return success_count, failure_count
# ==================== Evaluation Execution ====================
@classmethod
def start_evaluation(cls, dataset_id: str, dialog_id: str,
user_id: str, name: Optional[str] = None) -> Tuple[bool, str]:
"""
Start an evaluation run.
Args:
dataset_id: Dataset ID
dialog_id: Dialog configuration to evaluate
user_id: User ID who starts the run
name: Optional run name
Returns:
(success, run_id or error_message)
"""
try:
# Get dialog configuration
success, dialog = DialogService.get_by_id(dialog_id)
if not success:
return False, "Dialog not found"
# Create evaluation run
run_id = get_uuid()
if not name:
name = f"Evaluation Run {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
run = {
"id": run_id,
"dataset_id": dataset_id,
"dialog_id": dialog_id,
"name": name,
"config_snapshot": dialog.to_dict(),
"metrics_summary": None,
"status": "RUNNING",
"created_by": user_id,
"create_time": current_timestamp(),
"complete_time": None
}
if not EvaluationRun.create(**run):
return False, "Failed to create evaluation run"
# Execute evaluation asynchronously (in production, use task queue)
# For now, we'll execute synchronously
cls._execute_evaluation(run_id, dataset_id, dialog)
return True, run_id
except Exception as e:
logging.error(f"Error starting evaluation: {e}")
return False, str(e)
@classmethod
def _execute_evaluation(cls, run_id: str, dataset_id: str, dialog: Any):
"""
Execute evaluation for all test cases.
This method runs the RAG pipeline for each test case and computes metrics.
"""
try:
# Get all test cases
test_cases = cls.get_test_cases(dataset_id)
if not test_cases:
EvaluationRun.update(
status="FAILED",
complete_time=current_timestamp()
).where(EvaluationRun.id == run_id).execute()
return
# Execute each test case
results = []
for case in test_cases:
result = cls._evaluate_single_case(run_id, case, dialog)
if result:
results.append(result)
# Compute summary metrics
metrics_summary = cls._compute_summary_metrics(results)
# Update run status
EvaluationRun.update(
status="COMPLETED",
metrics_summary=metrics_summary,
complete_time=current_timestamp()
).where(EvaluationRun.id == run_id).execute()
except Exception as e:
logging.error(f"Error executing evaluation {run_id}: {e}")
EvaluationRun.update(
status="FAILED",
complete_time=current_timestamp()
).where(EvaluationRun.id == run_id).execute()
@classmethod
def _evaluate_single_case(cls, run_id: str, case: Dict[str, Any],
dialog: Any) -> Optional[Dict[str, Any]]:
"""
Evaluate a single test case.
Args:
run_id: Evaluation run ID
case: Test case dictionary
dialog: Dialog configuration
Returns:
Result dictionary or None if failed
"""
try:
# Prepare messages
messages = [{"role": "user", "content": case["question"]}]
# Execute RAG pipeline
start_time = timer()
answer = ""
retrieved_chunks = []
def _sync_from_async_gen(async_gen):
result_queue: queue.Queue = queue.Queue()
def runner():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def consume():
try:
async for item in async_gen:
result_queue.put(item)
except Exception as e:
result_queue.put(e)
finally:
result_queue.put(StopIteration)
loop.run_until_complete(consume())
loop.close()
threading.Thread(target=runner, daemon=True).start()
while True:
item = result_queue.get()
if item is StopIteration:
break
if isinstance(item, Exception):
raise item
yield item
def chat(dialog, messages, stream=True, **kwargs):
from api.db.services.dialog_service import async_chat
return _sync_from_async_gen(async_chat(dialog, messages, stream=stream, **kwargs))
for ans in chat(dialog, messages, stream=False):
if isinstance(ans, dict):
answer = ans.get("answer", "")
retrieved_chunks = ans.get("reference", {}).get("chunks", [])
break
execution_time = timer() - start_time
# Compute metrics
metrics = cls._compute_metrics(
question=case["question"],
generated_answer=answer,
reference_answer=case.get("reference_answer"),
retrieved_chunks=retrieved_chunks,
relevant_chunk_ids=case.get("relevant_chunk_ids"),
dialog=dialog
)
# Save result
result_id = get_uuid()
result = {
"id": result_id,
"run_id": run_id,
"case_id": case["id"],
"generated_answer": answer,
"retrieved_chunks": retrieved_chunks,
"metrics": metrics,
"execution_time": execution_time,
"token_usage": None, # TODO: Track token usage
"create_time": current_timestamp()
}
EvaluationResult.create(**result)
return result
except Exception as e:
logging.error(f"Error evaluating case {case.get('id')}: {e}")
return None
@classmethod
def _compute_metrics(cls, question: str, generated_answer: str,
reference_answer: Optional[str],
retrieved_chunks: List[Dict[str, Any]],
relevant_chunk_ids: Optional[List[str]],
dialog: Any) -> Dict[str, float]:
"""
Compute evaluation metrics for a single test case.
Returns:
Dictionary of metric names to values
"""
metrics = {}
# Retrieval metrics (if ground truth chunks provided)
if relevant_chunk_ids:
retrieved_ids = [c.get("chunk_id") for c in retrieved_chunks]
metrics.update(cls._compute_retrieval_metrics(retrieved_ids, relevant_chunk_ids))
# Generation metrics
if generated_answer:
# Basic metrics
metrics["answer_length"] = len(generated_answer)
metrics["has_answer"] = 1.0 if generated_answer.strip() else 0.0
# TODO: Implement advanced metrics using LLM-as-judge
# - Faithfulness (hallucination detection)
# - Answer relevance
# - Context relevance
# - Semantic similarity (if reference answer provided)
return metrics
@classmethod
def _compute_retrieval_metrics(cls, retrieved_ids: List[str],
relevant_ids: List[str]) -> Dict[str, float]:
"""
Compute retrieval metrics.
Args:
retrieved_ids: List of retrieved chunk IDs
relevant_ids: List of relevant chunk IDs (ground truth)
Returns:
Dictionary of retrieval metrics
"""
if not relevant_ids:
return {}
retrieved_set = set(retrieved_ids)
relevant_set = set(relevant_ids)
# Precision: proportion of retrieved that are relevant
precision = len(retrieved_set & relevant_set) / len(retrieved_set) if retrieved_set else 0.0
# Recall: proportion of relevant that were retrieved
recall = len(retrieved_set & relevant_set) / len(relevant_set) if relevant_set else 0.0
# F1 score
f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
# Hit rate: whether any relevant chunk was retrieved
hit_rate = 1.0 if (retrieved_set & relevant_set) else 0.0
# MRR (Mean Reciprocal Rank): position of first relevant chunk
mrr = 0.0
for i, chunk_id in enumerate(retrieved_ids, 1):
if chunk_id in relevant_set:
mrr = 1.0 / i
break
return {
"precision": precision,
"recall": recall,
"f1_score": f1,
"hit_rate": hit_rate,
"mrr": mrr
}
@classmethod
def _compute_summary_metrics(cls, results: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Compute summary metrics across all test cases.
Args:
results: List of result dictionaries
Returns:
Summary metrics dictionary
"""
if not results:
return {}
# Aggregate metrics
metric_sums = {}
metric_counts = {}
for result in results:
metrics = result.get("metrics", {})
for key, value in metrics.items():
if isinstance(value, (int, float)):
metric_sums[key] = metric_sums.get(key, 0) + value
metric_counts[key] = metric_counts.get(key, 0) + 1
# Compute averages
summary = {
"total_cases": len(results),
"avg_execution_time": sum(r.get("execution_time", 0) for r in results) / len(results)
}
for key in metric_sums:
summary[f"avg_{key}"] = metric_sums[key] / metric_counts[key]
return summary
# ==================== Results & Analysis ====================
@classmethod
def get_run_results(cls, run_id: str) -> Dict[str, Any]:
"""Get results for an evaluation run"""
try:
run = EvaluationRun.get_by_id(run_id)
if not run:
return {}
results = EvaluationResult.select().where(
EvaluationResult.run_id == run_id
).order_by(EvaluationResult.create_time)
return {
"run": run.to_dict(),
"results": [r.to_dict() for r in results]
}
except Exception as e:
logging.error(f"Error getting run results {run_id}: {e}")
return {}
@classmethod
def get_recommendations(cls, run_id: str) -> List[Dict[str, Any]]:
"""
Analyze evaluation results and provide configuration recommendations.
Args:
run_id: Evaluation run ID
Returns:
List of recommendation dictionaries
"""
try:
run = EvaluationRun.get_by_id(run_id)
if not run or not run.metrics_summary:
return []
metrics = run.metrics_summary
recommendations = []
# Low precision: retrieving irrelevant chunks
if metrics.get("avg_precision", 1.0) < 0.7:
recommendations.append({
"issue": "Low Precision",
"severity": "high",
"description": "System is retrieving many irrelevant chunks",
"suggestions": [
"Increase similarity_threshold to filter out less relevant chunks",
"Enable reranking to improve chunk ordering",
"Reduce top_k to return fewer chunks"
]
})
# Low recall: missing relevant chunks
if metrics.get("avg_recall", 1.0) < 0.7:
recommendations.append({
"issue": "Low Recall",
"severity": "high",
"description": "System is missing relevant chunks",
"suggestions": [
"Increase top_k to retrieve more chunks",
"Lower similarity_threshold to be more inclusive",
"Enable hybrid search (keyword + semantic)",
"Check chunk size - may be too large or too small"
]
})
# Slow response time
if metrics.get("avg_execution_time", 0) > 5.0:
recommendations.append({
"issue": "Slow Response Time",
"severity": "medium",
"description": f"Average response time is {metrics['avg_execution_time']:.2f}s",
"suggestions": [
"Reduce top_k to retrieve fewer chunks",
"Optimize embedding model selection",
"Consider caching frequently asked questions"
]
})
return recommendations
except Exception as e:
logging.error(f"Error generating recommendations for run {run_id}: {e}")
return []
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/evaluation_service.py",
"license": "Apache License 2.0",
"lines": 544,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/http_client.py | # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
from typing import Any, Dict, Optional
from urllib.parse import urlparse, urlunparse
from common import settings
import httpx
logger = logging.getLogger(__name__)
# Default knobs; keep conservative to avoid unexpected behavioural changes.
DEFAULT_TIMEOUT = float(os.environ.get("HTTP_CLIENT_TIMEOUT", "15"))
# Align with requests default: follow redirects with a max of 30 unless overridden.
DEFAULT_FOLLOW_REDIRECTS = bool(
int(os.environ.get("HTTP_CLIENT_FOLLOW_REDIRECTS", "1"))
)
DEFAULT_MAX_REDIRECTS = int(os.environ.get("HTTP_CLIENT_MAX_REDIRECTS", "30"))
DEFAULT_MAX_RETRIES = int(os.environ.get("HTTP_CLIENT_MAX_RETRIES", "2"))
DEFAULT_BACKOFF_FACTOR = float(os.environ.get("HTTP_CLIENT_BACKOFF_FACTOR", "0.5"))
DEFAULT_PROXY = os.environ.get("HTTP_CLIENT_PROXY")
DEFAULT_USER_AGENT = os.environ.get("HTTP_CLIENT_USER_AGENT", "ragflow-http-client")
def _clean_headers(
headers: Optional[Dict[str, str]], auth_token: Optional[str] = None
) -> Optional[Dict[str, str]]:
merged_headers: Dict[str, str] = {}
if DEFAULT_USER_AGENT:
merged_headers["User-Agent"] = DEFAULT_USER_AGENT
if auth_token:
merged_headers["Authorization"] = auth_token
if headers is None:
return merged_headers or None
merged_headers.update({str(k): str(v) for k, v in headers.items() if v is not None})
return merged_headers or None
def _get_delay(backoff_factor: float, attempt: int) -> float:
return backoff_factor * (2**attempt)
# List of sensitive parameters to redact from URLs before logging
_SENSITIVE_QUERY_KEYS = {"client_secret", "secret", "code", "access_token", "refresh_token", "password", "token", "app_secret"}
def _redact_sensitive_url_params(url: str) -> str:
"""
Return a version of the URL that is safe to log.
We intentionally drop query parameters and userinfo to avoid leaking
credentials or tokens via logs. Only scheme, host, port and path
are preserved.
"""
try:
parsed = urlparse(url)
# Remove any potential userinfo (username:password@)
netloc = parsed.hostname or ""
if parsed.port:
netloc = f"{netloc}:{parsed.port}"
# Reconstruct URL without query, params, fragment, or userinfo.
safe_url = urlunparse(
(
parsed.scheme,
netloc,
parsed.path,
"", # params
"", # query
"", # fragment
)
)
return safe_url
except Exception:
# If parsing fails, fall back to omitting the URL entirely.
return "<redacted-url>"
def _is_sensitive_url(url: str) -> bool:
"""Return True if URL is one of the configured OAuth endpoints."""
# Collect known sensitive endpoint URLs from settings
oauth_urls = set()
# GitHub OAuth endpoints
try:
if settings.GITHUB_OAUTH is not None:
url_val = settings.GITHUB_OAUTH.get("url")
if url_val:
oauth_urls.add(url_val)
except Exception:
pass
# Feishu OAuth endpoints
try:
if settings.FEISHU_OAUTH is not None:
for k in ("app_access_token_url", "user_access_token_url"):
url_val = settings.FEISHU_OAUTH.get(k)
if url_val:
oauth_urls.add(url_val)
except Exception:
pass
# Defensive normalization: compare only scheme+netloc+path
url_obj = urlparse(url)
for sensitive_url in oauth_urls:
sensitive_obj = urlparse(sensitive_url)
if (url_obj.scheme, url_obj.netloc, url_obj.path) == (sensitive_obj.scheme, sensitive_obj.netloc, sensitive_obj.path):
return True
return False
async def async_request(
method: str,
url: str,
*,
request_timeout: float | httpx.Timeout | None = None,
follow_redirects: bool | None = None,
max_redirects: Optional[int] = None,
headers: Optional[Dict[str, str]] = None,
auth_token: Optional[str] = None,
retries: Optional[int] = None,
backoff_factor: Optional[float] = None,
proxy: Any = None,
**kwargs: Any,
) -> httpx.Response:
"""Lightweight async HTTP wrapper using httpx.AsyncClient with safe defaults."""
timeout = request_timeout if request_timeout is not None else DEFAULT_TIMEOUT
follow_redirects = (
DEFAULT_FOLLOW_REDIRECTS if follow_redirects is None else follow_redirects
)
max_redirects = DEFAULT_MAX_REDIRECTS if max_redirects is None else max_redirects
retries = DEFAULT_MAX_RETRIES if retries is None else max(retries, 0)
backoff_factor = (
DEFAULT_BACKOFF_FACTOR if backoff_factor is None else backoff_factor
)
headers = _clean_headers(headers, auth_token=auth_token)
proxy = DEFAULT_PROXY if proxy is None else proxy
async with httpx.AsyncClient(
timeout=timeout,
follow_redirects=follow_redirects,
max_redirects=max_redirects,
proxy=proxy,
) as client:
last_exc: Exception | None = None
for attempt in range(retries + 1):
try:
start = time.monotonic()
response = await client.request(
method=method, url=url, headers=headers, **kwargs
)
duration = time.monotonic() - start
if not _is_sensitive_url(url):
log_url = _redact_sensitive_url_params(url)
logger.debug(f"async_request {method} {log_url} -> {response.status_code} in {duration:.3f}s")
return response
except httpx.RequestError as exc:
last_exc = exc
if attempt >= retries:
if not _is_sensitive_url(url):
log_url = _redact_sensitive_url_params(url)
logger.warning(f"async_request exhausted retries for {method} {log_url}")
raise
delay = _get_delay(backoff_factor, attempt)
if not _is_sensitive_url(url):
log_url = _redact_sensitive_url_params(url)
logger.warning(
f"async_request attempt {attempt + 1}/{retries + 1} failed for {method} {log_url}; retrying in {delay:.2f}s"
)
await asyncio.sleep(delay)
raise last_exc # pragma: no cover
def sync_request(
method: str,
url: str,
*,
timeout: float | httpx.Timeout | None = None,
follow_redirects: bool | None = None,
max_redirects: Optional[int] = None,
headers: Optional[Dict[str, str]] = None,
auth_token: Optional[str] = None,
retries: Optional[int] = None,
backoff_factor: Optional[float] = None,
proxy: Any = None,
**kwargs: Any,
) -> httpx.Response:
"""Synchronous counterpart to async_request, for CLI/tests or sync contexts."""
timeout = timeout if timeout is not None else DEFAULT_TIMEOUT
follow_redirects = (
DEFAULT_FOLLOW_REDIRECTS if follow_redirects is None else follow_redirects
)
max_redirects = DEFAULT_MAX_REDIRECTS if max_redirects is None else max_redirects
retries = DEFAULT_MAX_RETRIES if retries is None else max(retries, 0)
backoff_factor = (
DEFAULT_BACKOFF_FACTOR if backoff_factor is None else backoff_factor
)
headers = _clean_headers(headers, auth_token=auth_token)
proxy = DEFAULT_PROXY if proxy is None else proxy
with httpx.Client(
timeout=timeout,
follow_redirects=follow_redirects,
max_redirects=max_redirects,
proxy=proxy,
) as client:
last_exc: Exception | None = None
for attempt in range(retries + 1):
try:
start = time.monotonic()
response = client.request(
method=method, url=url, headers=headers, **kwargs
)
duration = time.monotonic() - start
logger.debug(
f"sync_request {method} {url} -> {response.status_code} in {duration:.3f}s"
)
return response
except httpx.RequestError as exc:
last_exc = exc
if attempt >= retries:
logger.warning(
f"sync_request exhausted retries for {method} {url}: {exc}"
)
raise
delay = _get_delay(backoff_factor, attempt)
logger.warning(
f"sync_request attempt {attempt + 1}/{retries + 1} failed for {method} {url}: {exc}; retrying in {delay:.2f}s"
)
time.sleep(delay)
raise last_exc # pragma: no cover
__all__ = [
"async_request",
"sync_request",
"DEFAULT_TIMEOUT",
"DEFAULT_FOLLOW_REDIRECTS",
"DEFAULT_MAX_REDIRECTS",
"DEFAULT_MAX_RETRIES",
"DEFAULT_BACKOFF_FACTOR",
"DEFAULT_PROXY",
"DEFAULT_USER_AGENT",
]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/http_client.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
infiniflow/ragflow:agent/component/exit_loop.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
from agent.component.base import ComponentBase, ComponentParamBase
class ExitLoopParam(ComponentParamBase, ABC):
def check(self):
return True
class ExitLoop(ComponentBase, ABC):
component_name = "ExitLoop"
def _invoke(self, **kwargs):
pass
def thoughts(self) -> str:
return "" | {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/exit_loop.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:agent/component/loop.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
from agent.component.base import ComponentBase, ComponentParamBase
class LoopParam(ComponentParamBase):
"""
Define the Loop component parameters.
"""
def __init__(self):
super().__init__()
self.loop_variables = []
self.loop_termination_condition=[]
self.maximum_loop_count = 0
def get_input_form(self) -> dict[str, dict]:
return {
"items": {
"type": "json",
"name": "Items"
}
}
def check(self):
return True
class Loop(ComponentBase, ABC):
component_name = "Loop"
def get_start(self):
for cid in self._canvas.components.keys():
if self._canvas.get_component(cid)["obj"].component_name.lower() != "loopitem":
continue
if self._canvas.get_component(cid)["parent_id"] == self._id:
return cid
def _invoke(self, **kwargs):
if self.check_if_canceled("Loop processing"):
return
for item in self._param.loop_variables:
if any([not item.get("variable"), not item.get("input_mode"), not item.get("value"),not item.get("type")]):
assert "Loop Variable is not complete."
if item["input_mode"]=="variable":
self.set_output(item["variable"],self._canvas.get_variable_value(item["value"]))
elif item["input_mode"]=="constant":
self.set_output(item["variable"],item["value"])
else:
if item["type"] == "number":
self.set_output(item["variable"], 0)
elif item["type"] == "string":
self.set_output(item["variable"], "")
elif item["type"] == "boolean":
self.set_output(item["variable"], False)
elif item["type"].startswith("object"):
self.set_output(item["variable"], {})
elif item["type"].startswith("array"):
self.set_output(item["variable"], [])
else:
self.set_output(item["variable"], "")
def thoughts(self) -> str:
return "Loop from canvas." | {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/loop.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/loopitem.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
from agent.component.base import ComponentBase, ComponentParamBase
class LoopItemParam(ComponentParamBase):
"""
Define the LoopItem component parameters.
"""
def check(self):
return True
class LoopItem(ComponentBase, ABC):
component_name = "LoopItem"
def __init__(self, canvas, id, param: ComponentParamBase):
super().__init__(canvas, id, param)
self._idx = 0
def _invoke(self, **kwargs):
if self.check_if_canceled("LoopItem processing"):
return
parent = self.get_parent()
maximum_loop_count = parent._param.maximum_loop_count
if self._idx >= maximum_loop_count:
self._idx = -1
return
if self._idx > 0:
if self.check_if_canceled("LoopItem processing"):
return
self._idx += 1
def evaluate_condition(self,var, operator, value):
if isinstance(var, str):
if operator == "contains":
return value in var
elif operator == "not contains":
return value not in var
elif operator == "start with":
return var.startswith(value)
elif operator == "end with":
return var.endswith(value)
elif operator == "is":
return var == value
elif operator == "is not":
return var != value
elif operator == "empty":
return var == ""
elif operator == "not empty":
return var != ""
elif isinstance(var, (int, float)):
if operator == "=":
return var == value
elif operator == "≠":
return var != value
elif operator == ">":
return var > value
elif operator == "<":
return var < value
elif operator == "≥":
return var >= value
elif operator == "≤":
return var <= value
elif operator == "empty":
return var is None
elif operator == "not empty":
return var is not None
elif isinstance(var, bool):
if operator == "is":
return var is value
elif operator == "is not":
return var is not value
elif operator == "empty":
return var is None
elif operator == "not empty":
return var is not None
elif isinstance(var, dict):
if operator == "empty":
return len(var) == 0
elif operator == "not empty":
return len(var) > 0
elif isinstance(var, list):
if operator == "contains":
return value in var
elif operator == "not contains":
return value not in var
elif operator == "is":
return var == value
elif operator == "is not":
return var != value
elif operator == "empty":
return len(var) == 0
elif operator == "not empty":
return len(var) > 0
elif var is None:
if operator == "empty":
return True
return False
raise Exception(f"Invalid operator: {operator}")
def end(self):
if self._idx == -1:
return True
parent = self.get_parent()
logical_operator = parent._param.logical_operator if hasattr(parent._param, "logical_operator") else "and"
conditions = []
for item in parent._param.loop_termination_condition:
if not item.get("variable") or not item.get("operator"):
raise ValueError("Loop condition is incomplete.")
var = self._canvas.get_variable_value(item["variable"])
operator = item["operator"]
input_mode = item.get("input_mode", "constant")
if input_mode == "variable":
value = self._canvas.get_variable_value(item.get("value", ""))
elif input_mode == "constant":
value = item.get("value", "")
else:
raise ValueError("Invalid input mode.")
conditions.append(self.evaluate_condition(var, operator, value))
should_end = (
all(conditions) if logical_operator == "and"
else any(conditions) if logical_operator == "or"
else None
)
if should_end is None:
raise ValueError("Invalid logical operator,should be 'and' or 'or'.")
if should_end:
self._idx = -1
return True
return False
def next(self):
if self._idx == -1:
self._idx = 0
else:
self._idx += 1
if self._idx >= len(self._items):
self._idx = -1
return False
def thoughts(self) -> str:
return "Next turn..." | {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/loopitem.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/webdav_connector.py | """WebDAV connector"""
import logging
import os
from datetime import datetime, timezone
from typing import Any, Optional
from webdav4.client import Client as WebDAVClient
from common.data_source.utils import (
get_file_ext,
)
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE, BLOB_STORAGE_SIZE_THRESHOLD
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError
)
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, SecondsSinceUnixEpoch, GenerateDocumentsOutput
class WebDAVConnector(LoadConnector, PollConnector):
"""WebDAV connector for syncing files from WebDAV servers"""
def __init__(
self,
base_url: str,
remote_path: str = "/",
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
"""Initialize WebDAV connector
Args:
base_url: Base URL of the WebDAV server (e.g., "https://webdav.example.com")
remote_path: Remote path to sync from (default: "/")
batch_size: Number of documents per batch
"""
self.base_url = base_url.rstrip("/")
if not remote_path:
remote_path = "/"
if not remote_path.startswith("/"):
remote_path = f"/{remote_path}"
if remote_path.endswith("/") and remote_path != "/":
remote_path = remote_path.rstrip("/")
self.remote_path = remote_path
self.batch_size = batch_size
self.client: Optional[WebDAVClient] = None
self._allow_images: bool | None = None
self.size_threshold: int | None = BLOB_STORAGE_SIZE_THRESHOLD
def set_allow_images(self, allow_images: bool) -> None:
"""Set whether to process images"""
logging.info(f"Setting allow_images to {allow_images}.")
self._allow_images = allow_images
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load credentials and initialize WebDAV client
Args:
credentials: Dictionary containing 'username' and 'password'
Returns:
None
Raises:
ConnectorMissingCredentialError: If required credentials are missing
"""
logging.debug(f"Loading credentials for WebDAV server {self.base_url}")
username = credentials.get("username")
password = credentials.get("password")
if not username or not password:
raise ConnectorMissingCredentialError(
"WebDAV requires 'username' and 'password' credentials"
)
try:
# Initialize WebDAV client
self.client = WebDAVClient(
base_url=self.base_url,
auth=(username, password)
)
except Exception as e:
logging.error(f"Failed to connect to WebDAV server: {e}")
raise ConnectorMissingCredentialError(
f"Failed to authenticate with WebDAV server: {e}"
)
return None
def _list_files_recursive(
self,
path: str,
start: datetime,
end: datetime,
) -> list[tuple[str, dict]]:
"""Recursively list all files in the given path
Args:
path: Path to list files from
start: Start datetime for filtering
end: End datetime for filtering
Returns:
List of tuples containing (file_path, file_info)
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV client not initialized")
files = []
try:
logging.debug(f"Listing directory: {path}")
for item in self.client.ls(path, detail=True):
item_path = item['name']
if item_path == path or item_path == path + '/':
continue
logging.debug(f"Found item: {item_path}, type: {item.get('type')}")
if item.get('type') == 'directory':
try:
files.extend(self._list_files_recursive(item_path, start, end))
except Exception as e:
logging.error(f"Error recursing into directory {item_path}: {e}")
continue
else:
try:
modified_time = item.get('modified')
if modified_time:
if isinstance(modified_time, datetime):
modified = modified_time
if modified.tzinfo is None:
modified = modified.replace(tzinfo=timezone.utc)
elif isinstance(modified_time, str):
try:
modified = datetime.strptime(modified_time, '%a, %d %b %Y %H:%M:%S %Z')
modified = modified.replace(tzinfo=timezone.utc)
except (ValueError, TypeError):
try:
modified = datetime.fromisoformat(modified_time.replace('Z', '+00:00'))
except (ValueError, TypeError):
logging.warning(f"Could not parse modified time for {item_path}: {modified_time}")
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
logging.debug(f"File {item_path}: modified={modified}, start={start}, end={end}, include={start < modified <= end}")
if start < modified <= end:
files.append((item_path, item))
else:
logging.debug(f"File {item_path} filtered out by time range")
except Exception as e:
logging.error(f"Error processing file {item_path}: {e}")
continue
except Exception as e:
logging.error(f"Error listing directory {path}: {e}")
return files
def _yield_webdav_documents(
self,
start: datetime,
end: datetime,
) -> GenerateDocumentsOutput:
"""Generate documents from WebDAV server
Args:
start: Start datetime for filtering
end: End datetime for filtering
Yields:
Batches of documents
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV client not initialized")
logging.info(f"Searching for files in {self.remote_path} between {start} and {end}")
files = self._list_files_recursive(self.remote_path, start, end)
logging.info(f"Found {len(files)} files matching time criteria")
filename_counts: dict[str, int] = {}
for file_path, _ in files:
file_name = os.path.basename(file_path)
filename_counts[file_name] = filename_counts.get(file_name, 0) + 1
batch: list[Document] = []
for file_path, file_info in files:
file_name = os.path.basename(file_path)
size_bytes = file_info.get('size', 0)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{file_name} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
try:
logging.debug(f"Downloading file: {file_path}")
from io import BytesIO
buffer = BytesIO()
self.client.download_fileobj(file_path, buffer)
blob = buffer.getvalue()
if blob is None or len(blob) == 0:
logging.warning(f"Downloaded content is empty for {file_path}")
continue
modified_time = file_info.get('modified')
if modified_time:
if isinstance(modified_time, datetime):
modified = modified_time
if modified.tzinfo is None:
modified = modified.replace(tzinfo=timezone.utc)
elif isinstance(modified_time, str):
try:
modified = datetime.strptime(modified_time, '%a, %d %b %Y %H:%M:%S %Z')
modified = modified.replace(tzinfo=timezone.utc)
except (ValueError, TypeError):
try:
modified = datetime.fromisoformat(modified_time.replace('Z', '+00:00'))
except (ValueError, TypeError):
logging.warning(f"Could not parse modified time for {file_path}: {modified_time}")
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
if filename_counts.get(file_name, 0) > 1:
relative_path = file_path
if file_path.startswith(self.remote_path):
relative_path = file_path[len(self.remote_path):]
if relative_path.startswith('/'):
relative_path = relative_path[1:]
semantic_id = relative_path.replace('/', ' / ') if relative_path else file_name
else:
semantic_id = file_name
batch.append(
Document(
id=f"webdav:{self.base_url}:{file_path}",
blob=blob,
source=DocumentSource.WEBDAV,
semantic_identifier=semantic_id,
extension=get_file_ext(file_name),
doc_updated_at=modified,
size_bytes=size_bytes if size_bytes else 0
)
)
if len(batch) == self.batch_size:
yield batch
batch = []
except Exception as e:
logging.exception(f"Error downloading file {file_path}: {e}")
if batch:
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load all documents from WebDAV server
Yields:
Batches of documents
"""
logging.debug(f"Loading documents from WebDAV server {self.base_url}")
return self._yield_webdav_documents(
start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime.now(timezone.utc),
)
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
"""Poll WebDAV server for updated documents
Args:
start: Start timestamp (seconds since Unix epoch)
end: End timestamp (seconds since Unix epoch)
Yields:
Batches of documents
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV client not initialized")
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
for batch in self._yield_webdav_documents(start_datetime, end_datetime):
yield batch
def validate_connector_settings(self) -> None:
"""Validate WebDAV connector settings.
Validation should exercise the same code-paths used by the connector
(directory listing / PROPFIND), avoiding exists() which may probe with
methods that differ across servers.
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV credentials not loaded.")
if not self.base_url:
raise ConnectorValidationError("No base URL was provided in connector settings.")
# Normalize directory path: for collections, many servers behave better with trailing '/'
test_path = self.remote_path or "/"
if not test_path.startswith("/"):
test_path = f"/{test_path}"
if test_path != "/" and not test_path.endswith("/"):
test_path = f"{test_path}/"
try:
# Use the same behavior as real sync: list directory with details (PROPFIND)
self.client.ls(test_path, detail=True)
except Exception as e:
# Prefer structured status codes if present on the exception/response
status = None
for attr in ("status_code", "code"):
v = getattr(e, attr, None)
if isinstance(v, int):
status = v
break
if status is None:
resp = getattr(e, "response", None)
v = getattr(resp, "status_code", None)
if isinstance(v, int):
status = v
# If we can classify by status code, do it
if status == 401:
raise CredentialExpiredError("WebDAV credentials appear invalid or expired.")
if status == 403:
raise InsufficientPermissionsError(
f"Insufficient permissions to access path '{self.remote_path}' on WebDAV server."
)
if status == 404:
raise ConnectorValidationError(
f"Remote path '{self.remote_path}' does not exist on WebDAV server."
)
# Fallback: avoid brittle substring matching that caused false positives.
# Provide the original exception for diagnosis.
raise ConnectorValidationError(
f"WebDAV validation failed for path '{test_path}': {repr(e)}"
)
if __name__ == "__main__":
credentials_dict = {
"username": os.environ.get("WEBDAV_USERNAME"),
"password": os.environ.get("WEBDAV_PASSWORD"),
}
credentials_dict = {
"username": "user",
"password": "pass",
}
connector = WebDAVConnector(
base_url="http://172.17.0.1:8080/",
remote_path="/",
)
try:
connector.load_credentials(credentials_dict)
connector.validate_connector_settings()
document_batch_generator = connector.load_from_state()
for document_batch in document_batch_generator:
print("First batch of documents:")
for doc in document_batch:
print(f"Document ID: {doc.id}")
print(f"Semantic Identifier: {doc.semantic_identifier}")
print(f"Source: {doc.source}")
print(f"Updated At: {doc.doc_updated_at}")
print("---")
break
except ConnectorMissingCredentialError as e:
print(f"Error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/webdav_connector.py",
"license": "Apache License 2.0",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/moodle_connector.py | from __future__ import annotations
import logging
import os
from collections.abc import Generator
from datetime import datetime, timezone
from retry import retry
from typing import Any, Optional
from markdownify import markdownify as md
from moodle import Moodle as MoodleClient, MoodleException
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
CredentialExpiredError,
InsufficientPermissionsError,
ConnectorValidationError,
)
from common.data_source.interfaces import (
LoadConnector,
PollConnector,
SecondsSinceUnixEpoch,
)
from common.data_source.models import Document
from common.data_source.utils import batch_generator, rl_requests
logger = logging.getLogger(__name__)
class MoodleConnector(LoadConnector, PollConnector):
"""Moodle LMS connector for accessing course content"""
def __init__(self, moodle_url: str, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.moodle_url = moodle_url.rstrip("/")
self.batch_size = batch_size
self.moodle_client: Optional[MoodleClient] = None
def _add_token_to_url(self, file_url: str) -> str:
"""Append Moodle token to URL if missing"""
if not self.moodle_client:
return file_url
token = getattr(self.moodle_client, "token", "")
if "token=" in file_url.lower():
return file_url
delimiter = "&" if "?" in file_url else "?"
return f"{file_url}{delimiter}token={token}"
def _log_error(
self, context: str, error: Exception, level: str = "warning"
) -> None:
"""Simplified logging wrapper"""
msg = f"{context}: {error}"
if level == "error":
logger.error(msg)
else:
logger.warning(msg)
def _get_latest_timestamp(self, *timestamps: int) -> int:
"""Return latest valid timestamp"""
return max((t for t in timestamps if t and t > 0), default=0)
def _yield_in_batches(
self, generator: Generator[Document, None, None]
) -> Generator[list[Document], None, None]:
for batch in batch_generator(generator, self.batch_size):
yield batch
def load_credentials(self, credentials: dict[str, Any]) -> None:
token = credentials.get("moodle_token")
if not token:
raise ConnectorMissingCredentialError("Moodle API token is required")
try:
self.moodle_client = MoodleClient(
self.moodle_url + "/webservice/rest/server.php", token
)
self.moodle_client.core.webservice.get_site_info()
except MoodleException as e:
if "invalidtoken" in str(e).lower():
raise CredentialExpiredError("Moodle token is invalid or expired")
raise ConnectorMissingCredentialError(
f"Failed to initialize Moodle client: {e}"
)
def validate_connector_settings(self) -> None:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
try:
site_info = self.moodle_client.core.webservice.get_site_info()
if not site_info.sitename:
raise InsufficientPermissionsError("Invalid Moodle API response")
except MoodleException as e:
msg = str(e).lower()
if "invalidtoken" in msg:
raise CredentialExpiredError("Moodle token is invalid or expired")
if "accessexception" in msg:
raise InsufficientPermissionsError(
"Insufficient permissions. Ensure web services are enabled and permissions are correct."
)
raise ConnectorValidationError(f"Moodle validation error: {e}")
except Exception as e:
raise ConnectorValidationError(f"Unexpected validation error: {e}")
# -------------------------------------------------------------------------
# Data loading & polling
# -------------------------------------------------------------------------
def load_from_state(self) -> Generator[list[Document], None, None]:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
logger.info("Starting full load from Moodle workspace")
courses = self._get_enrolled_courses()
if not courses:
logger.warning("No courses found to process")
return
yield from self._yield_in_batches(self._process_courses(courses))
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> Generator[list[Document], None, None]:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
logger.info(
f"Polling Moodle updates between {datetime.fromtimestamp(start)} and {datetime.fromtimestamp(end)}"
)
courses = self._get_enrolled_courses()
if not courses:
logger.warning("No courses found to poll")
return
yield from self._yield_in_batches(
self._get_updated_content(courses, start, end)
)
@retry(tries=3, delay=1, backoff=2)
def _get_enrolled_courses(self) -> list:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
try:
return self.moodle_client.core.course.get_courses()
except MoodleException as e:
self._log_error("fetching courses", e, "error")
raise ConnectorValidationError(f"Failed to fetch courses: {e}")
@retry(tries=3, delay=1, backoff=2)
def _get_course_contents(self, course_id: int):
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
try:
return self.moodle_client.core.course.get_contents(courseid=course_id)
except MoodleException as e:
self._log_error(f"fetching course contents for {course_id}", e)
return []
def _process_courses(self, courses) -> Generator[Document, None, None]:
for course in courses:
try:
contents = self._get_course_contents(course.id)
for section in contents:
for module in section.modules:
doc = self._process_module(course, section, module)
if doc:
yield doc
except Exception as e:
self._log_error(f"processing course {course.fullname}", e)
def _get_updated_content(
self, courses, start: float, end: float
) -> Generator[Document, None, None]:
for course in courses:
try:
contents = self._get_course_contents(course.id)
for section in contents:
for module in section.modules:
times = [
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
]
if hasattr(module, "contents"):
times.extend(
getattr(c, "timemodified", 0)
for c in module.contents
if c and getattr(c, "timemodified", 0)
)
last_mod = self._get_latest_timestamp(*times)
if start < last_mod <= end:
doc = self._process_module(course, section, module)
if doc:
yield doc
except Exception as e:
self._log_error(f"polling course {course.fullname}", e)
def _process_module(self, course, section, module) -> Optional[Document]:
try:
mtype = module.modname
if mtype in ["label", "url"]:
return None
if mtype == "resource":
return self._process_resource(course, section, module)
if mtype == "forum":
return self._process_forum(course, section, module)
if mtype == "page":
return self._process_page(course, section, module)
if mtype in ["assign", "quiz"]:
return self._process_activity(course, section, module)
if mtype == "book":
return self._process_book(course, section, module)
except Exception as e:
self._log_error(f"processing module {getattr(module, 'name', '?')}", e)
return None
def _process_resource(self, course, section, module) -> Optional[Document]:
if not getattr(module, "contents", None):
return None
file_info = module.contents[0]
if not getattr(file_info, "fileurl", None):
return None
file_name = os.path.basename(file_info.filename)
ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
getattr(file_info, "timemodified", 0),
)
try:
resp = rl_requests.get(
self._add_token_to_url(file_info.fileurl), timeout=60
)
resp.raise_for_status()
blob = resp.content
ext = os.path.splitext(file_name)[1] or ".bin"
semantic_id = f"{course.fullname} / {section.name} / {file_name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"module_instance": getattr(module, "instance", None),
"file_url": getattr(file_info, "fileurl", None),
"file_name": file_name,
"file_size": getattr(file_info, "filesize", len(blob)),
"file_type": getattr(file_info, "mimetype", None),
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_resource_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=ext,
blob=blob,
doc_updated_at=datetime.fromtimestamp(ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
except Exception as e:
self._log_error(f"downloading resource {file_name}", e, "error")
return None
def _process_forum(self, course, section, module) -> Optional[Document]:
if not self.moodle_client or not getattr(module, "instance", None):
return None
try:
result = self.moodle_client.mod.forum.get_forum_discussions(
forumid=module.instance
)
disc_list = getattr(result, "discussions", [])
if not disc_list:
return None
markdown = [f"# {module.name}\n"]
latest_ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
)
for d in disc_list:
markdown.append(f"## {d.name}\n\n{md(d.message or '')}\n\n---\n")
latest_ts = max(latest_ts, getattr(d, "timemodified", 0))
blob = "\n".join(markdown).encode("utf-8")
semantic_id = f"{course.fullname} / {section.name} / {module.name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"forum_id": getattr(module, "instance", None),
"discussion_count": len(disc_list),
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
"discussions": [
{
"id": getattr(d, "id", None),
"name": getattr(d, "name", None),
"user_id": getattr(d, "userid", None),
"user_fullname": getattr(d, "userfullname", None),
"time_created": getattr(d, "timecreated", None),
"time_modified": getattr(d, "timemodified", None),
}
for d in disc_list
],
}
return Document(
id=f"moodle_forum_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=".md",
blob=blob,
doc_updated_at=datetime.fromtimestamp(latest_ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
except Exception as e:
self._log_error(f"processing forum {module.name}", e)
return None
def _process_page(self, course, section, module) -> Optional[Document]:
if not getattr(module, "contents", None):
return None
file_info = module.contents[0]
if not getattr(file_info, "fileurl", None):
return None
file_name = os.path.basename(file_info.filename)
ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
getattr(file_info, "timemodified", 0),
)
try:
resp = rl_requests.get(
self._add_token_to_url(file_info.fileurl), timeout=60
)
resp.raise_for_status()
blob = resp.content
ext = os.path.splitext(file_name)[1] or ".html"
semantic_id = f"{course.fullname} / {section.name} / {module.name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"module_instance": getattr(module, "instance", None),
"page_url": getattr(file_info, "fileurl", None),
"file_name": file_name,
"file_size": getattr(file_info, "filesize", len(blob)),
"file_type": getattr(file_info, "mimetype", None),
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_page_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=ext,
blob=blob,
doc_updated_at=datetime.fromtimestamp(ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
except Exception as e:
self._log_error(f"processing page {file_name}", e, "error")
return None
def _process_activity(self, course, section, module) -> Optional[Document]:
desc = getattr(module, "description", "")
if not desc:
return None
mtype, mname = module.modname, module.name
markdown = f"# {mname}\n\n**Type:** {mtype.capitalize()}\n\n{md(desc)}"
ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
getattr(module, "added", 0),
)
semantic_id = f"{course.fullname} / {section.name} / {mname}"
blob = markdown.encode("utf-8")
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"activity_type": mtype,
"activity_instance": getattr(module, "instance", None),
"description": desc,
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"added": getattr(module, "added", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_{mtype}_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=".md",
blob=blob,
doc_updated_at=datetime.fromtimestamp(ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
def _process_book(self, course, section, module) -> Optional[Document]:
if not getattr(module, "contents", None):
return None
contents = module.contents
chapters = [
c
for c in contents
if getattr(c, "fileurl", None)
and os.path.basename(c.filename) == "index.html"
]
if not chapters:
return None
latest_ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
*[getattr(c, "timecreated", 0) for c in contents],
*[getattr(c, "timemodified", 0) for c in contents],
)
markdown_parts = [f"# {module.name}\n"]
chapter_info = []
for ch in chapters:
try:
resp = rl_requests.get(self._add_token_to_url(ch.fileurl), timeout=60)
resp.raise_for_status()
html = resp.content.decode("utf-8", errors="ignore")
markdown_parts.append(md(html) + "\n\n---\n")
# Collect chapter information for metadata
chapter_info.append(
{
"chapter_id": getattr(ch, "chapterid", None),
"title": getattr(ch, "title", None),
"filename": getattr(ch, "filename", None),
"fileurl": getattr(ch, "fileurl", None),
"time_created": getattr(ch, "timecreated", None),
"time_modified": getattr(ch, "timemodified", None),
"size": getattr(ch, "filesize", None),
}
)
except Exception as e:
self._log_error(f"processing book chapter {ch.filename}", e)
blob = "\n".join(markdown_parts).encode("utf-8")
semantic_id = f"{course.fullname} / {section.name} / {module.name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"book_id": getattr(module, "instance", None),
"chapter_count": len(chapters),
"chapters": chapter_info,
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_book_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=".md",
blob=blob,
doc_updated_at=datetime.fromtimestamp(latest_ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/moodle_connector.py",
"license": "Apache License 2.0",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/utils/ob_conn.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import re
import time
from typing import Any, Optional
import numpy as np
from elasticsearch_dsl import Q, Search
from pydantic import BaseModel
from pymysql.converters import escape_string
from pyobvector import ARRAY
from sqlalchemy import Column, String, Integer, JSON, Double, Row
from sqlalchemy.dialects.mysql import LONGTEXT, TEXT
from sqlalchemy.sql.type_api import TypeEngine
from common.constants import PAGERANK_FLD, TAG_FLD
from common.decorator import singleton
from common.doc_store.doc_store_base import MatchExpr, OrderByExpr, FusionExpr, MatchTextExpr, MatchDenseExpr
from common.doc_store.ob_conn_base import (
OBConnectionBase, get_value_str,
vector_search_template, vector_column_pattern,
fulltext_index_name_template, doc_meta_column_names,
doc_meta_column_types,
)
from common.float_utils import get_float
from rag.nlp import rag_tokenizer
logger = logging.getLogger('ragflow.ob_conn')
column_order_id = Column("_order_id", Integer, nullable=True, comment="chunk order id for maintaining sequence")
column_group_id = Column("group_id", String(256), nullable=True, comment="group id for external retrieval")
column_mom_id = Column("mom_id", String(256), nullable=True, comment="parent chunk id")
column_chunk_data = Column("chunk_data", JSON, nullable=True, comment="table parser row data")
column_definitions: list[Column] = [
Column("id", String(256), primary_key=True, comment="chunk id"),
Column("kb_id", String(256), nullable=False, index=True, comment="knowledge base id"),
Column("doc_id", String(256), nullable=True, index=True, comment="document id"),
Column("docnm_kwd", String(256), nullable=True, comment="document name"),
Column("doc_type_kwd", String(256), nullable=True, comment="document type"),
Column("title_tks", String(256), nullable=True, comment="title tokens"),
Column("title_sm_tks", String(256), nullable=True, comment="fine-grained (small) title tokens"),
Column("content_with_weight", LONGTEXT, nullable=True, comment="the original content"),
Column("content_ltks", LONGTEXT, nullable=True, comment="long text tokens derived from content_with_weight"),
Column("content_sm_ltks", LONGTEXT, nullable=True, comment="fine-grained (small) tokens derived from content_ltks"),
Column("pagerank_fea", Integer, nullable=True, comment="page rank priority, usually set in kb level"),
Column("important_kwd", ARRAY(String(256)), nullable=True, comment="keywords"),
Column("important_tks", TEXT, nullable=True, comment="keyword tokens"),
Column("question_kwd", ARRAY(String(1024)), nullable=True, comment="questions"),
Column("question_tks", TEXT, nullable=True, comment="question tokens"),
Column("tag_kwd", ARRAY(String(256)), nullable=True, comment="tags"),
Column("tag_feas", JSON, nullable=True,
comment="tag features used for 'rank_feature', format: [tag -> relevance score]"),
Column("available_int", Integer, nullable=False, index=True, server_default="1",
comment="status of availability, 0 for unavailable, 1 for available"),
Column("create_time", String(19), nullable=True, comment="creation time in YYYY-MM-DD HH:MM:SS format"),
Column("create_timestamp_flt", Double, nullable=True, comment="creation timestamp in float format"),
Column("img_id", String(128), nullable=True, comment="image id"),
Column("position_int", ARRAY(ARRAY(Integer)), nullable=True, comment="position"),
Column("page_num_int", ARRAY(Integer), nullable=True, comment="page number"),
Column("top_int", ARRAY(Integer), nullable=True, comment="rank from the top"),
Column("knowledge_graph_kwd", String(256), nullable=True, index=True, comment="knowledge graph chunk type"),
Column("source_id", ARRAY(String(256)), nullable=True, comment="source document id"),
Column("entity_kwd", String(256), nullable=True, comment="entity name"),
Column("entity_type_kwd", String(256), nullable=True, index=True, comment="entity type"),
Column("from_entity_kwd", String(256), nullable=True, comment="the source entity of this edge"),
Column("to_entity_kwd", String(256), nullable=True, comment="the target entity of this edge"),
Column("weight_int", Integer, nullable=True, comment="the weight of this edge"),
Column("weight_flt", Double, nullable=True, comment="the weight of community report"),
Column("entities_kwd", ARRAY(String(256)), nullable=True, comment="node ids of entities"),
Column("rank_flt", Double, nullable=True, comment="rank of this entity"),
Column("removed_kwd", String(256), nullable=True, index=True, server_default="'N'",
comment="whether it has been deleted"),
column_chunk_data,
Column("metadata", JSON, nullable=True, comment="metadata for this chunk"),
Column("extra", JSON, nullable=True, comment="extra information of non-general chunk"),
column_order_id,
column_group_id,
column_mom_id,
]
column_names: list[str] = [col.name for col in column_definitions]
column_types: dict[str, TypeEngine] = {col.name: col.type for col in column_definitions}
array_columns: list[str] = [col.name for col in column_definitions if isinstance(col.type, ARRAY)]
# Index columns for RAG chunk table
INDEX_COLUMNS: list[str] = [
"kb_id",
"doc_id",
"available_int",
"knowledge_graph_kwd",
"entity_type_kwd",
"removed_kwd",
]
# Full-text search columns (with weight) - original content
FTS_COLUMNS_ORIGIN: list[str] = [
"docnm_kwd^10",
"content_with_weight",
"important_tks^20",
"question_tks^20",
]
# Full-text search columns (with weight) - tokenized content
FTS_COLUMNS_TKS: list[str] = [
"title_tks^10",
"title_sm_tks^5",
"important_tks^20",
"question_tks^20",
"content_ltks^2",
"content_sm_ltks",
]
# Extra columns to add after table creation (for migration)
EXTRA_COLUMNS: list[Column] = [column_order_id, column_group_id, column_mom_id, column_chunk_data]
class SearchResult(BaseModel):
total: int
chunks: list[dict]
def get_column_value(column_name: str, value: Any) -> Any:
# Check chunk table columns first, then doc_meta table columns
column_type = column_types.get(column_name) or doc_meta_column_types.get(column_name)
if column_type:
if isinstance(column_type, String):
return str(value)
elif isinstance(column_type, Integer):
return int(value)
elif isinstance(column_type, Double):
return float(value)
elif isinstance(column_type, ARRAY) or isinstance(column_type, JSON):
if isinstance(value, str):
try:
return json.loads(value)
except json.JSONDecodeError:
return value
else:
return value
else:
raise ValueError(f"Unsupported column type for column '{column_name}': {column_type}")
elif vector_column_pattern.match(column_name):
if isinstance(value, str):
try:
return json.loads(value)
except json.JSONDecodeError:
return value
else:
return value
elif column_name == "_score":
return float(value)
else:
raise ValueError(f"Unknown column '{column_name}' with value '{value}'.")
def get_default_value(column_name: str) -> Any:
if column_name == "available_int":
return 1
elif column_name == "removed_kwd":
return "N"
elif column_name == "_order_id":
return 0
else:
return None
def get_metadata_filter_expression(metadata_filtering_conditions: dict) -> str:
"""
Convert metadata filtering conditions to MySQL JSON path expression.
Args:
metadata_filtering_conditions: dict with 'conditions' and 'logical_operator' keys
Returns:
MySQL JSON path expression string
"""
if not metadata_filtering_conditions:
return ""
conditions = metadata_filtering_conditions.get("conditions", [])
logical_operator = metadata_filtering_conditions.get("logical_operator", "and").upper()
if not conditions:
return ""
if logical_operator not in ["AND", "OR"]:
raise ValueError(f"Unsupported logical operator: {logical_operator}. Only 'and' and 'or' are supported.")
metadata_filters = []
for condition in conditions:
name = condition.get("name")
comparison_operator = condition.get("comparison_operator")
value = condition.get("value")
if not all([name, comparison_operator]):
continue
expr = f"JSON_EXTRACT(metadata, '$.{name}')"
value_str = get_value_str(value)
# Convert comparison operator to MySQL JSON path syntax
if comparison_operator == "is":
# JSON_EXTRACT(metadata, '$.field_name') = 'value'
metadata_filters.append(f"{expr} = {value_str}")
elif comparison_operator == "is not":
metadata_filters.append(f"{expr} != {value_str}")
elif comparison_operator == "contains":
metadata_filters.append(f"JSON_CONTAINS({expr}, {value_str})")
elif comparison_operator == "not contains":
metadata_filters.append(f"NOT JSON_CONTAINS({expr}, {value_str})")
elif comparison_operator == "start with":
metadata_filters.append(f"{expr} LIKE CONCAT({value_str}, '%')")
elif comparison_operator == "end with":
metadata_filters.append(f"{expr} LIKE CONCAT('%', {value_str})")
elif comparison_operator == "empty":
metadata_filters.append(f"({expr} IS NULL OR {expr} = '' OR {expr} = '[]' OR {expr} = '{{}}')")
elif comparison_operator == "not empty":
metadata_filters.append(f"({expr} IS NOT NULL AND {expr} != '' AND {expr} != '[]' AND {expr} != '{{}}')")
# Number operators
elif comparison_operator == "=":
metadata_filters.append(f"CAST({expr} AS DECIMAL(20,10)) = {value_str}")
elif comparison_operator == "≠":
metadata_filters.append(f"CAST({expr} AS DECIMAL(20,10)) != {value_str}")
elif comparison_operator == ">":
metadata_filters.append(f"CAST({expr} AS DECIMAL(20,10)) > {value_str}")
elif comparison_operator == "<":
metadata_filters.append(f"CAST({expr} AS DECIMAL(20,10)) < {value_str}")
elif comparison_operator == "≥":
metadata_filters.append(f"CAST({expr} AS DECIMAL(20,10)) >= {value_str}")
elif comparison_operator == "≤":
metadata_filters.append(f"CAST({expr} AS DECIMAL(20,10)) <= {value_str}")
# Time operators
elif comparison_operator == "before":
metadata_filters.append(f"CAST({expr} AS DATETIME) < {value_str}")
elif comparison_operator == "after":
metadata_filters.append(f"CAST({expr} AS DATETIME) > {value_str}")
else:
logger.warning(f"Unsupported comparison operator: {comparison_operator}")
continue
if not metadata_filters:
return ""
return f"({f' {logical_operator} '.join(metadata_filters)})"
def get_filters(condition: dict) -> list[str]:
filters: list[str] = []
for k, v in condition.items():
if not v:
continue
if k == "exists":
filters.append(f"{v} IS NOT NULL")
elif k == "must_not" and isinstance(v, dict) and "exists" in v:
filters.append(f"{v.get('exists')} IS NULL")
elif k == "metadata_filtering_conditions":
# Handle metadata filtering conditions
metadata_filter = get_metadata_filter_expression(v)
if metadata_filter:
filters.append(metadata_filter)
elif k in array_columns:
if isinstance(v, list):
array_filters = []
for vv in v:
array_filters.append(f"array_contains({k}, {get_value_str(vv)})")
array_filter = " OR ".join(array_filters)
filters.append(f"({array_filter})")
else:
filters.append(f"array_contains({k}, {get_value_str(v)})")
elif isinstance(v, list):
values: list[str] = []
for item in v:
values.append(get_value_str(item))
value = ", ".join(values)
filters.append(f"{k} IN ({value})")
else:
filters.append(f"{k} = {get_value_str(v)}")
return filters
@singleton
class OBConnection(OBConnectionBase):
def __init__(self):
super().__init__(logger_name='ragflow.ob_conn')
# Determine which columns to use for full-text search dynamically
self._fulltext_search_columns = FTS_COLUMNS_ORIGIN if self.search_original_content else FTS_COLUMNS_TKS
"""
Template method implementations
"""
def get_index_columns(self) -> list[str]:
return INDEX_COLUMNS
def get_column_definitions(self) -> list[Column]:
return column_definitions
def get_extra_columns(self) -> list[Column]:
return EXTRA_COLUMNS
def get_lock_prefix(self) -> str:
return "ob_"
def _get_filters(self, condition: dict) -> list[str]:
return get_filters(condition)
def get_fulltext_columns(self) -> list[str]:
"""Return list of column names that need fulltext indexes (without weight suffix)."""
return [col.split("^")[0] for col in self._fulltext_search_columns]
def delete_idx(self, index_name: str, dataset_id: str):
if dataset_id:
# The index need to be alive after any kb deletion since all kb under this tenant are in one index.
return
super().delete_idx(index_name, dataset_id)
"""
Performance monitoring
"""
def get_performance_metrics(self) -> dict:
"""
Get comprehensive performance metrics for OceanBase.
Returns:
dict: Performance metrics including latency, storage, QPS, and slow queries
"""
metrics = {
"connection": "connected",
"latency_ms": 0.0,
"storage_used": "0B",
"storage_total": "0B",
"query_per_second": 0,
"slow_queries": 0,
"active_connections": 0,
"max_connections": 0
}
try:
# Measure connection latency
start_time = time.time()
self.client.perform_raw_text_sql("SELECT 1").fetchone()
metrics["latency_ms"] = round((time.time() - start_time) * 1000, 2)
# Get storage information
try:
storage_info = self._get_storage_info()
metrics.update(storage_info)
except Exception as e:
logger.warning(f"Failed to get storage info: {str(e)}")
# Get connection pool statistics
try:
pool_stats = self._get_connection_pool_stats()
metrics.update(pool_stats)
except Exception as e:
logger.warning(f"Failed to get connection pool stats: {str(e)}")
# Get slow query statistics
try:
slow_queries = self._get_slow_query_count()
metrics["slow_queries"] = slow_queries
except Exception as e:
logger.warning(f"Failed to get slow query count: {str(e)}")
# Get QPS (Queries Per Second) - approximate from processlist
try:
qps = self._estimate_qps()
metrics["query_per_second"] = qps
except Exception as e:
logger.warning(f"Failed to estimate QPS: {str(e)}")
except Exception as e:
metrics["connection"] = "disconnected"
metrics["error"] = str(e)
logger.error(f"Failed to get OceanBase performance metrics: {str(e)}")
return metrics
def _get_storage_info(self) -> dict:
"""
Get storage space usage information.
Returns:
dict: Storage information with used and total space
"""
try:
# Get database size
result = self.client.perform_raw_text_sql(
f"SELECT ROUND(SUM(data_length + index_length) / 1024 / 1024, 2) AS 'size_mb' "
f"FROM information_schema.tables WHERE table_schema = '{self.db_name}'"
).fetchone()
size_mb = float(result[0]) if result and result[0] else 0.0
# Try to get total available space (may not be available in all OceanBase versions)
try:
result = self.client.perform_raw_text_sql(
"SELECT ROUND(SUM(total_size) / 1024 / 1024 / 1024, 2) AS 'total_gb' "
"FROM oceanbase.__all_disk_stat"
).fetchone()
total_gb = float(result[0]) if result and result[0] else None
except Exception:
# Fallback: estimate total space (100GB default if not available)
total_gb = 100.0
return {
"storage_used": f"{size_mb:.2f}MB",
"storage_total": f"{total_gb:.2f}GB" if total_gb else "N/A"
}
except Exception as e:
logger.warning(f"Failed to get storage info: {str(e)}")
return {
"storage_used": "N/A",
"storage_total": "N/A"
}
def _get_connection_pool_stats(self) -> dict:
"""
Get connection pool statistics.
Returns:
dict: Connection pool statistics
"""
try:
# Get active connections from processlist
result = self.client.perform_raw_text_sql("SHOW PROCESSLIST")
active_connections = len(list(result.fetchall()))
# Get max_connections setting
max_conn_result = self.client.perform_raw_text_sql(
"SHOW VARIABLES LIKE 'max_connections'"
).fetchone()
max_connections = int(max_conn_result[1]) if max_conn_result and max_conn_result[1] else 0
# Get pool size from client if available
pool_size = getattr(self.client, 'pool_size', None) or 0
return {
"active_connections": active_connections,
"max_connections": max_connections if max_connections > 0 else pool_size,
"pool_size": pool_size
}
except Exception as e:
logger.warning(f"Failed to get connection pool stats: {str(e)}")
return {
"active_connections": 0,
"max_connections": 0,
"pool_size": 0
}
def _get_slow_query_count(self, threshold_seconds: int = 1) -> int:
"""
Get count of slow queries (queries taking longer than threshold).
Args:
threshold_seconds: Threshold in seconds for slow queries (default: 1)
Returns:
int: Number of slow queries
"""
try:
result = self.client.perform_raw_text_sql(
f"SELECT COUNT(*) FROM information_schema.processlist "
f"WHERE time > {threshold_seconds} AND command != 'Sleep'"
).fetchone()
return int(result[0]) if result and result[0] else 0
except Exception as e:
logger.warning(f"Failed to get slow query count: {str(e)}")
return 0
def _estimate_qps(self) -> int:
"""
Estimate queries per second from processlist.
Returns:
int: Estimated queries per second
"""
try:
# Count active queries (non-Sleep commands)
result = self.client.perform_raw_text_sql(
"SELECT COUNT(*) FROM information_schema.processlist WHERE command != 'Sleep'"
).fetchone()
active_queries = int(result[0]) if result and result[0] else 0
# Rough estimate: assume average query takes 0.1 seconds
# This is a simplified estimation
estimated_qps = max(0, active_queries * 10)
return estimated_qps
except Exception as e:
logger.warning(f"Failed to estimate QPS: {str(e)}")
return 0
"""
CRUD operations
"""
def search(
self,
select_fields: list[str],
highlight_fields: list[str],
condition: dict,
match_expressions: list[MatchExpr],
order_by: OrderByExpr,
offset: int,
limit: int,
index_names: str | list[str],
knowledgebase_ids: list[str],
agg_fields: list[str] = [],
rank_feature: dict | None = None,
**kwargs,
):
if isinstance(index_names, str):
index_names = index_names.split(",")
assert isinstance(index_names, list) and len(index_names) > 0
index_names = list(set(index_names))
if len(match_expressions) == 3:
if not self.enable_fulltext_search:
# disable fulltext search in fusion search, which means fallback to vector search
match_expressions = [m for m in match_expressions if isinstance(m, MatchDenseExpr)]
else:
for m in match_expressions:
if isinstance(m, FusionExpr):
weights = m.fusion_params["weights"]
vector_similarity_weight = get_float(weights.split(",")[1])
# skip the search if its weight is zero
if vector_similarity_weight <= 0.0:
match_expressions = [m for m in match_expressions if isinstance(m, MatchTextExpr)]
elif vector_similarity_weight >= 1.0:
match_expressions = [m for m in match_expressions if isinstance(m, MatchDenseExpr)]
result: SearchResult = SearchResult(
total=0,
chunks=[],
)
# copied from es_conn.py
if len(match_expressions) == 3 and self.es:
bqry = Q("bool", must=[])
condition["kb_id"] = knowledgebase_ids
for k, v in condition.items():
if k == "available_int":
if v == 0:
bqry.filter.append(Q("range", available_int={"lt": 1}))
else:
bqry.filter.append(
Q("bool", must_not=Q("range", available_int={"lt": 1})))
continue
if not v:
continue
if isinstance(v, list):
bqry.filter.append(Q("terms", **{k: v}))
elif isinstance(v, str) or isinstance(v, int):
bqry.filter.append(Q("term", **{k: v}))
else:
raise Exception(
f"Condition `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str or list.")
s = Search()
vector_similarity_weight = 0.5
for m in match_expressions:
if isinstance(m, FusionExpr) and m.method == "weighted_sum" and "weights" in m.fusion_params:
assert len(match_expressions) == 3 and isinstance(match_expressions[0], MatchTextExpr) and isinstance(
match_expressions[1],
MatchDenseExpr) and isinstance(
match_expressions[2], FusionExpr)
weights = m.fusion_params["weights"]
vector_similarity_weight = get_float(weights.split(",")[1])
for m in match_expressions:
if isinstance(m, MatchTextExpr):
minimum_should_match = m.extra_options.get("minimum_should_match", 0.0)
if isinstance(minimum_should_match, float):
minimum_should_match = str(int(minimum_should_match * 100)) + "%"
bqry.must.append(Q("query_string", fields=FTS_COLUMNS_TKS,
type="best_fields", query=m.matching_text,
minimum_should_match=minimum_should_match,
boost=1))
bqry.boost = 1.0 - vector_similarity_weight
elif isinstance(m, MatchDenseExpr):
assert (bqry is not None)
similarity = 0.0
if "similarity" in m.extra_options:
similarity = m.extra_options["similarity"]
s = s.knn(m.vector_column_name,
m.topn,
m.topn * 2,
query_vector=list(m.embedding_data),
filter=bqry.to_dict(),
similarity=similarity,
)
if bqry and rank_feature:
for fld, sc in rank_feature.items():
if fld != PAGERANK_FLD:
fld = f"{TAG_FLD}.{fld}"
bqry.should.append(Q("rank_feature", field=fld, linear={}, boost=sc))
if bqry:
s = s.query(bqry)
# for field in highlightFields:
# s = s.highlight(field)
if order_by:
orders = list()
for field, order in order_by.fields:
order = "asc" if order == 0 else "desc"
if field in ["page_num_int", "top_int"]:
order_info = {"order": order, "unmapped_type": "float",
"mode": "avg", "numeric_type": "double"}
elif field.endswith("_int") or field.endswith("_flt"):
order_info = {"order": order, "unmapped_type": "float"}
else:
order_info = {"order": order, "unmapped_type": "text"}
orders.append({field: order_info})
s = s.sort(*orders)
for fld in agg_fields:
s.aggs.bucket(f'aggs_{fld}', 'terms', field=fld, size=1000000)
if limit > 0:
s = s[offset:offset + limit]
q = s.to_dict()
logger.debug(f"OBConnection.hybrid_search {str(index_names)} query: " + json.dumps(q))
for index_name in index_names:
start_time = time.time()
res = self.es.search(index=index_name,
body=q,
timeout="600s",
track_total_hits=True,
_source=True)
elapsed_time = time.time() - start_time
logger.info(
f"OBConnection.search table {index_name}, search type: hybrid, elapsed time: {elapsed_time:.3f} seconds,"
f" got count: {len(res)}"
)
for chunk in res:
result.chunks.append(self._es_row_to_entity(chunk))
result.total = result.total + 1
return result
output_fields = select_fields.copy()
if "*" in output_fields:
if index_names[0].startswith("ragflow_doc_meta_"):
output_fields = doc_meta_column_names.copy()
else:
output_fields = column_names.copy()
if "id" not in output_fields:
output_fields = ["id"] + output_fields
if "_score" in output_fields:
output_fields.remove("_score")
if highlight_fields:
for field in highlight_fields:
if field not in output_fields:
output_fields.append(field)
fields_expr = ", ".join(output_fields)
condition["kb_id"] = knowledgebase_ids
filters: list[str] = get_filters(condition)
filters_expr = " AND ".join(filters)
fulltext_query: Optional[str] = None
fulltext_topn: Optional[int] = None
fulltext_search_weight: dict[str, float] = {}
fulltext_search_expr: dict[str, str] = {}
fulltext_search_idx_list: list[str] = []
fulltext_search_score_expr: Optional[str] = None
fulltext_search_filter: Optional[str] = None
vector_column_name: Optional[str] = None
vector_data: Optional[list[float]] = None
vector_topn: Optional[int] = None
vector_similarity_threshold: Optional[float] = None
vector_similarity_weight: Optional[float] = None
vector_search_expr: Optional[str] = None
vector_search_score_expr: Optional[str] = None
vector_search_filter: Optional[str] = None
for m in match_expressions:
if isinstance(m, MatchTextExpr):
assert "original_query" in m.extra_options, "'original_query' is missing in extra_options."
fulltext_query = m.extra_options["original_query"]
fulltext_query = escape_string(fulltext_query.strip())
fulltext_topn = m.topn
fulltext_search_expr, fulltext_search_weight = self._parse_fulltext_columns(
fulltext_query, self._fulltext_search_columns
)
for column_name in fulltext_search_expr.keys():
fulltext_search_idx_list.append(fulltext_index_name_template % column_name)
elif isinstance(m, MatchDenseExpr):
assert m.embedding_data_type == "float", f"embedding data type '{m.embedding_data_type}' is not float."
vector_column_name = m.vector_column_name
vector_data = m.embedding_data
vector_topn = m.topn
vector_similarity_threshold = m.extra_options.get("similarity", 0.0)
elif isinstance(m, FusionExpr):
weights = m.fusion_params["weights"]
vector_similarity_weight = get_float(weights.split(",")[1])
if fulltext_query:
fulltext_search_filter = f"({' OR '.join([expr for expr in fulltext_search_expr.values()])})"
fulltext_search_score_expr = f"({' + '.join(f'{expr} * {fulltext_search_weight.get(col, 0)}' for col, expr in fulltext_search_expr.items())})"
if vector_data:
vector_data_str = "[" + ",".join([str(np.float32(v)) for v in vector_data]) + "]"
vector_search_expr = vector_search_template % (vector_column_name, vector_data_str)
# use (1 - cosine_distance) as score, which should be [-1, 1]
# https://www.oceanbase.com/docs/common-oceanbase-database-standalone-1000000003577323
vector_search_score_expr = f"(1 - {vector_search_expr})"
vector_search_filter = f"{vector_search_score_expr} >= {vector_similarity_threshold}"
pagerank_score_expr = f"(CAST(IFNULL({PAGERANK_FLD}, 0) AS DECIMAL(10, 2)) / 100)"
# TODO use tag rank_feature in sorting
# tag_rank_fea = {k: float(v) for k, v in (rank_feature or {}).items() if k != PAGERANK_FLD}
if fulltext_query and vector_data:
search_type = "fusion"
elif fulltext_query:
search_type = "fulltext"
elif vector_data:
search_type = "vector"
elif len(agg_fields) > 0:
search_type = "aggregation"
else:
search_type = "filter"
if search_type in ["fusion", "fulltext", "vector"] and "_score" not in output_fields:
output_fields.append("_score")
if limit:
if vector_topn is not None:
limit = min(vector_topn, limit)
if fulltext_topn is not None:
limit = min(fulltext_topn, limit)
for index_name in index_names:
if not self._check_table_exists_cached(index_name):
continue
fulltext_search_hint = f"/*+ UNION_MERGE({index_name} {' '.join(fulltext_search_idx_list)}) */" if self.use_fulltext_hint else ""
if search_type == "fusion":
# fusion search, usually for chat
num_candidates = vector_topn + fulltext_topn
if self.use_fulltext_first_fusion_search:
count_sql = (
f"WITH fulltext_results AS ("
f" SELECT {fulltext_search_hint} *, {fulltext_search_score_expr} AS relevance"
f" FROM {index_name}"
f" WHERE {filters_expr} AND {fulltext_search_filter}"
f" ORDER BY relevance DESC"
f" LIMIT {num_candidates}"
f")"
f" SELECT COUNT(*) FROM fulltext_results WHERE {vector_search_filter}"
)
else:
count_sql = (
f"WITH fulltext_results AS ("
f" SELECT {fulltext_search_hint} id FROM {index_name}"
f" WHERE {filters_expr} AND {fulltext_search_filter}"
f" ORDER BY {fulltext_search_score_expr}"
f" LIMIT {fulltext_topn}"
f"),"
f"vector_results AS ("
f" SELECT id FROM {index_name}"
f" WHERE {filters_expr} AND {vector_search_filter}"
f" ORDER BY {vector_search_expr}"
f" APPROXIMATE LIMIT {vector_topn}"
f")"
f" SELECT COUNT(*) FROM fulltext_results f FULL OUTER JOIN vector_results v ON f.id = v.id"
)
logger.debug("OBConnection.search with count sql: %s", count_sql)
rows, elapsed_time = self._execute_search_sql(count_sql)
total_count = rows[0][0] if rows else 0
result.total += total_count
logger.info(
f"OBConnection.search table {index_name}, search type: fusion, step: 1-count, elapsed time: {elapsed_time:.3f} seconds,"
f" vector column: '{vector_column_name}',"
f" query text: '{fulltext_query}',"
f" condition: '{condition}',"
f" vector_similarity_threshold: {vector_similarity_threshold},"
f" got count: {total_count}"
)
if total_count == 0:
continue
if self.use_fulltext_first_fusion_search:
score_expr = f"(relevance * {1 - vector_similarity_weight} + {vector_search_score_expr} * {vector_similarity_weight} + {pagerank_score_expr})"
fusion_sql = (
f"WITH fulltext_results AS ("
f" SELECT {fulltext_search_hint} *, {fulltext_search_score_expr} AS relevance"
f" FROM {index_name}"
f" WHERE {filters_expr} AND {fulltext_search_filter}"
f" ORDER BY relevance DESC"
f" LIMIT {num_candidates}"
f")"
f" SELECT {fields_expr}, {score_expr} AS _score"
f" FROM fulltext_results"
f" WHERE {vector_search_filter}"
f" ORDER BY _score DESC"
f" LIMIT {offset}, {limit}"
)
else:
pagerank_score_expr = f"(CAST(IFNULL(f.{PAGERANK_FLD}, 0) AS DECIMAL(10, 2)) / 100)"
score_expr = f"(f.relevance * {1 - vector_similarity_weight} + v.similarity * {vector_similarity_weight} + {pagerank_score_expr})"
fields_expr = ", ".join([f"t.{f} as {f}" for f in output_fields if f != "_score"])
fusion_sql = (
f"WITH fulltext_results AS ("
f" SELECT {fulltext_search_hint} id, pagerank_fea, {fulltext_search_score_expr} AS relevance"
f" FROM {index_name}"
f" WHERE {filters_expr} AND {fulltext_search_filter}"
f" ORDER BY relevance DESC"
f" LIMIT {fulltext_topn}"
f"),"
f"vector_results AS ("
f" SELECT id, pagerank_fea, {vector_search_score_expr} AS similarity"
f" FROM {index_name}"
f" WHERE {filters_expr} AND {vector_search_filter}"
f" ORDER BY {vector_search_expr}"
f" APPROXIMATE LIMIT {vector_topn}"
f"),"
f"combined_results AS ("
f" SELECT COALESCE(f.id, v.id) AS id, {score_expr} AS score"
f" FROM fulltext_results f"
f" FULL OUTER JOIN vector_results v"
f" ON f.id = v.id"
f")"
f" SELECT {fields_expr}, c.score as _score"
f" FROM combined_results c"
f" JOIN {index_name} t"
f" ON c.id = t.id"
f" ORDER BY score DESC"
f" LIMIT {offset}, {limit}"
)
logger.debug("OBConnection.search with fusion sql: %s", fusion_sql)
rows, elapsed_time = self._execute_search_sql(fusion_sql)
logger.info(
f"OBConnection.search table {index_name}, search type: fusion, step: 2-query, elapsed time: {elapsed_time:.3f} seconds,"
f" select fields: '{output_fields}',"
f" vector column: '{vector_column_name}',"
f" query text: '{fulltext_query}',"
f" condition: '{condition}',"
f" vector_similarity_threshold: {vector_similarity_threshold},"
f" vector_similarity_weight: {vector_similarity_weight},"
f" return rows count: {len(rows)}"
)
for row in rows:
result.chunks.append(self._row_to_entity(row, output_fields))
elif search_type == "vector":
# vector search, usually used for graph search
count_sql = self._build_count_sql(index_name, filters_expr, vector_search_filter)
logger.debug("OBConnection.search with vector count sql: %s", count_sql)
rows, elapsed_time = self._execute_search_sql(count_sql)
total_count = rows[0][0] if rows else 0
result.total += total_count
logger.info(
f"OBConnection.search table {index_name}, search type: vector, step: 1-count, elapsed time: {elapsed_time:.3f} seconds,"
f" vector column: '{vector_column_name}',"
f" condition: '{condition}',"
f" vector_similarity_threshold: {vector_similarity_threshold},"
f" got count: {total_count}"
)
if total_count == 0:
continue
vector_sql = self._build_vector_search_sql(
index_name, fields_expr, vector_search_score_expr, filters_expr,
vector_search_filter, vector_search_expr, limit, vector_topn, offset
)
logger.debug("OBConnection.search with vector sql: %s", vector_sql)
rows, elapsed_time = self._execute_search_sql(vector_sql)
logger.info(
f"OBConnection.search table {index_name}, search type: vector, step: 2-query, elapsed time: {elapsed_time:.3f} seconds,"
f" select fields: '{output_fields}',"
f" vector column: '{vector_column_name}',"
f" condition: '{condition}',"
f" vector_similarity_threshold: {vector_similarity_threshold},"
f" return rows count: {len(rows)}"
)
for row in rows:
result.chunks.append(self._row_to_entity(row, output_fields))
elif search_type == "fulltext":
# fulltext search, usually used to search chunks in one dataset
count_sql = self._build_count_sql(index_name, filters_expr, fulltext_search_filter, fulltext_search_hint)
logger.debug("OBConnection.search with fulltext count sql: %s", count_sql)
rows, elapsed_time = self._execute_search_sql(count_sql)
total_count = rows[0][0] if rows else 0
result.total += total_count
logger.info(
f"OBConnection.search table {index_name}, search type: fulltext, step: 1-count, elapsed time: {elapsed_time:.3f} seconds,"
f" query text: '{fulltext_query}',"
f" condition: '{condition}',"
f" got count: {total_count}"
)
if total_count == 0:
continue
fulltext_sql = self._build_fulltext_search_sql(
index_name, fields_expr, fulltext_search_score_expr, filters_expr,
fulltext_search_filter, offset, limit, fulltext_topn, fulltext_search_hint
)
logger.debug("OBConnection.search with fulltext sql: %s", fulltext_sql)
rows, elapsed_time = self._execute_search_sql(fulltext_sql)
logger.info(
f"OBConnection.search table {index_name}, search type: fulltext, step: 2-query, elapsed time: {elapsed_time:.3f} seconds,"
f" select fields: '{output_fields}',"
f" query text: '{fulltext_query}',"
f" condition: '{condition}',"
f" return rows count: {len(rows)}"
)
for row in rows:
result.chunks.append(self._row_to_entity(row, output_fields))
elif search_type == "aggregation":
# aggregation search
assert len(agg_fields) == 1, "Only one aggregation field is supported in OceanBase."
agg_field = agg_fields[0]
if agg_field in array_columns:
res = self.client.perform_raw_text_sql(
f"SELECT {agg_field} FROM {index_name}"
f" WHERE {agg_field} IS NOT NULL AND {filters_expr}"
)
counts = {}
for row in res:
if row[0]:
if isinstance(row[0], str):
try:
arr = json.loads(row[0])
except json.JSONDecodeError:
logger.warning(f"Failed to parse JSON array: {row[0]}")
continue
else:
arr = row[0]
if isinstance(arr, list):
for v in arr:
if isinstance(v, str) and v.strip():
counts[v] = counts.get(v, 0) + 1
for v, count in counts.items():
result.chunks.append({
"value": v,
"count": count,
})
result.total += len(counts)
else:
res = self.client.perform_raw_text_sql(
f"SELECT {agg_field}, COUNT(*) as count FROM {index_name}"
f" WHERE {agg_field} IS NOT NULL AND {filters_expr}"
f" GROUP BY {agg_field}"
)
for row in res:
result.chunks.append({
"value": row[0],
"count": int(row[1]),
})
result.total += 1
else:
# only filter
orders: list[str] = []
if order_by:
for field, order in order_by.fields:
if isinstance(column_types[field], ARRAY):
f = field + "_sort"
fields_expr += f", array_avg({field}) AS {f}"
field = f
order = "ASC" if order == 0 else "DESC"
orders.append(f"{field} {order}")
count_sql = self._build_count_sql(index_name, filters_expr)
logger.debug("OBConnection.search with normal count sql: %s", count_sql)
rows, elapsed_time = self._execute_search_sql(count_sql)
total_count = rows[0][0] if rows else 0
result.total += total_count
logger.info(
f"OBConnection.search table {index_name}, search type: normal, step: 1-count, elapsed time: {elapsed_time:.3f} seconds,"
f" condition: '{condition}',"
f" got count: {total_count}"
)
if total_count == 0:
continue
order_by_expr = ("ORDER BY " + ", ".join(orders)) if len(orders) > 0 else ""
limit_expr = f"LIMIT {offset}, {limit}" if limit != 0 else ""
filter_sql = self._build_filter_search_sql(
index_name, fields_expr, filters_expr, order_by_expr, limit_expr
)
logger.debug("OBConnection.search with normal sql: %s", filter_sql)
rows, elapsed_time = self._execute_search_sql(filter_sql)
logger.info(
f"OBConnection.search table {index_name}, search type: normal, step: 2-query, elapsed time: {elapsed_time:.3f} seconds,"
f" select fields: '{output_fields}',"
f" condition: '{condition}',"
f" return rows count: {len(rows)}"
)
for row in rows:
result.chunks.append(self._row_to_entity(row, output_fields))
if result.total == 0:
result.total = len(result.chunks)
return result
def get(self, chunk_id: str, index_name: str, knowledgebase_ids: list[str]) -> dict | None:
try:
doc = super().get(chunk_id, index_name, knowledgebase_ids)
if doc is None:
return None
return doc
except json.JSONDecodeError as e:
logger.error(f"JSON decode error when getting chunk {chunk_id}: {str(e)}")
return {
"id": chunk_id,
"error": f"Failed to parse chunk data due to invalid JSON: {str(e)}"
}
except Exception as e:
logger.exception(f"OBConnection.get({chunk_id}) got exception")
raise e
def insert(self, documents: list[dict], index_name: str, knowledgebase_id: str = None) -> list[str]:
if not documents:
return []
# For doc_meta tables, use simple insert without field transformation
if index_name.startswith("ragflow_doc_meta_"):
return self._insert_doc_meta(documents, index_name)
docs: list[dict] = []
ids: list[str] = []
for document in documents:
d: dict = {}
for k, v in document.items():
if vector_column_pattern.match(k):
d[k] = v
continue
if k not in column_names:
if "extra" not in d:
d["extra"] = {}
d["extra"][k] = v
continue
if v is None:
d[k] = get_default_value(k)
continue
if k == "kb_id" and isinstance(v, list):
d[k] = v[0]
elif k == "content_with_weight" and isinstance(v, dict):
d[k] = json.dumps(v, ensure_ascii=False)
elif k == "position_int":
d[k] = json.dumps([list(vv) for vv in v], ensure_ascii=False)
elif isinstance(v, list):
# remove characters like '\t' for JSON dump and clean special characters
cleaned_v = []
for vv in v:
if isinstance(vv, str):
cleaned_str = vv.strip()
cleaned_str = cleaned_str.replace('\\', '\\\\')
cleaned_str = cleaned_str.replace('\n', '\\n')
cleaned_str = cleaned_str.replace('\r', '\\r')
cleaned_str = cleaned_str.replace('\t', '\\t')
cleaned_v.append(cleaned_str)
else:
cleaned_v.append(vv)
d[k] = json.dumps(cleaned_v, ensure_ascii=False)
else:
d[k] = v
ids.append(d["id"])
# this is to fix https://github.com/sqlalchemy/sqlalchemy/issues/9703
for column_name in column_names:
if column_name not in d:
d[column_name] = get_default_value(column_name)
metadata = d.get("metadata", {})
if metadata is None:
metadata = {}
group_id = metadata.get("_group_id")
title = metadata.get("_title")
if d.get("doc_id"):
if group_id:
d["group_id"] = group_id
else:
d["group_id"] = d["doc_id"]
if title:
d["docnm_kwd"] = title
docs.append(d)
logger.debug("OBConnection.insert chunks: %s", docs)
res = []
try:
self.client.upsert(index_name, docs)
except Exception as e:
logger.error(f"OBConnection.insert error: {str(e)}")
res.append(str(e))
return res
def _insert_doc_meta(self, documents: list[dict], index_name: str) -> list[str]:
"""Insert documents into doc_meta table with simple field handling."""
docs: list[dict] = []
for document in documents:
d = {
"id": document.get("id"),
"kb_id": document.get("kb_id"),
}
# Handle meta_fields - store as JSON
meta_fields = document.get("meta_fields")
if meta_fields is not None:
if isinstance(meta_fields, dict):
d["meta_fields"] = json.dumps(meta_fields, ensure_ascii=False)
elif isinstance(meta_fields, str):
d["meta_fields"] = meta_fields
else:
d["meta_fields"] = "{}"
else:
d["meta_fields"] = "{}"
docs.append(d)
logger.debug("OBConnection._insert_doc_meta: %s", docs)
res = []
try:
self.client.upsert(index_name, docs)
except Exception as e:
logger.error(f"OBConnection._insert_doc_meta error: {str(e)}")
res.append(str(e))
return res
def update(self, condition: dict, new_value: dict, index_name: str, knowledgebase_id: str) -> bool:
if not self._check_table_exists_cached(index_name):
return True
# For doc_meta tables, don't force kb_id in condition
if not index_name.startswith("ragflow_doc_meta_"):
condition["kb_id"] = knowledgebase_id
filters = get_filters(condition)
set_values: list[str] = []
for k, v in new_value.items():
if k == "remove":
if isinstance(v, str):
set_values.append(f"{v} = NULL")
else:
assert isinstance(v, dict), f"Expected str or dict for 'remove', got {type(new_value[k])}."
for kk, vv in v.items():
assert kk in array_columns, f"Column '{kk}' is not an array column."
set_values.append(f"{kk} = array_remove({kk}, {get_value_str(vv)})")
elif k == "add":
assert isinstance(v, dict), f"Expected str or dict for 'add', got {type(new_value[k])}."
for kk, vv in v.items():
assert kk in array_columns, f"Column '{kk}' is not an array column."
set_values.append(f"{kk} = array_append({kk}, {get_value_str(vv)})")
elif k == "metadata":
assert isinstance(v, dict), f"Expected dict for 'metadata', got {type(new_value[k])}"
set_values.append(f"{k} = {get_value_str(v)}")
if v and "doc_id" in condition:
group_id = v.get("_group_id")
title = v.get("_title")
if group_id:
set_values.append(f"group_id = {get_value_str(group_id)}")
if title:
set_values.append(f"docnm_kwd = {get_value_str(title)}")
else:
set_values.append(f"{k} = {get_value_str(v)}")
if not set_values:
return True
update_sql = (
f"UPDATE {index_name}"
f" SET {', '.join(set_values)}"
f" WHERE {' AND '.join(filters)}"
)
logger.debug("OBConnection.update sql: %s", update_sql)
try:
self.client.perform_raw_text_sql(update_sql)
return True
except Exception as e:
logger.error(f"OBConnection.update error: {str(e)}")
return False
def _row_to_entity(self, data: Row, fields: list[str]) -> dict:
entity = {}
for i, field in enumerate(fields):
value = data[i]
if value is None:
continue
entity[field] = get_column_value(field, value)
return entity
@staticmethod
def _es_row_to_entity(data: dict) -> dict:
entity = {}
for k, v in data.items():
if v is None:
continue
entity[k] = get_column_value(k, v)
return entity
"""
Helper functions for search result
"""
def get_total(self, res) -> int:
return res.total
def get_doc_ids(self, res) -> list[str]:
return [row["id"] for row in res.chunks]
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
result = {}
for row in res.chunks:
data = {}
for field in fields:
v = row.get(field)
if v is not None:
data[field] = v
result[row["id"]] = data
return result
# copied from query.FulltextQueryer
def is_chinese(self, line):
arr = re.split(r"[ \t]+", line)
if len(arr) <= 3:
return True
e = 0
for t in arr:
if not re.match(r"[a-zA-Z]+$", t):
e += 1
return e * 1.0 / len(arr) >= 0.7
def highlight(self, txt: str, tks: str, question: str, keywords: list[str]) -> Optional[str]:
if not txt or not keywords:
return None
highlighted_txt = txt
if question and not self.is_chinese(question):
highlighted_txt = re.sub(
r"(^|\W)(%s)(\W|$)" % re.escape(question),
r"\1<em>\2</em>\3", highlighted_txt,
flags=re.IGNORECASE | re.MULTILINE,
)
if re.search(r"<em>[^<>]+</em>", highlighted_txt, flags=re.IGNORECASE | re.MULTILINE):
return highlighted_txt
for keyword in keywords:
highlighted_txt = re.sub(
r"(^|\W)(%s)(\W|$)" % re.escape(keyword),
r"\1<em>\2</em>\3", highlighted_txt,
flags=re.IGNORECASE | re.MULTILINE,
)
if len(re.findall(r'</em><em>', highlighted_txt)) > 0 or len(
re.findall(r'</em>\s*<em>', highlighted_txt)) > 0:
return highlighted_txt
else:
return None
if not tks:
tks = rag_tokenizer.tokenize(txt)
tokens = tks.split()
if not tokens:
return None
last_pos = len(txt)
for i in range(len(tokens) - 1, -1, -1):
token = tokens[i]
token_pos = highlighted_txt.rfind(token, 0, last_pos)
if token_pos != -1:
if token in keywords:
highlighted_txt = (
highlighted_txt[:token_pos] +
f'<em>{token}</em>' +
highlighted_txt[token_pos + len(token):]
)
last_pos = token_pos
return re.sub(r'</em><em>', '', highlighted_txt)
def get_highlight(self, res, keywords: list[str], fieldnm: str):
ans = {}
if len(res.chunks) == 0 or len(keywords) == 0:
return ans
for d in res.chunks:
txt = d.get(fieldnm)
if not txt:
continue
tks = d.get("content_ltks") if fieldnm == "content_with_weight" else ""
highlighted_txt = self.highlight(txt, tks, " ".join(keywords), keywords)
if highlighted_txt:
ans[d["id"]] = highlighted_txt
return ans
def get_aggregation(self, res, fieldnm: str):
if len(res.chunks) == 0:
return []
counts = {}
result = []
for d in res.chunks:
if "value" in d and "count" in d:
# directly use the aggregation result
result.append((d["value"], d["count"]))
elif fieldnm in d:
# aggregate the values of specific field
v = d[fieldnm]
if isinstance(v, list):
for vv in v:
if isinstance(vv, str) and vv.strip():
counts[vv] = counts.get(vv, 0) + 1
elif isinstance(v, str) and v.strip():
counts[v] = counts.get(v, 0) + 1
if len(counts) > 0:
for k, v in counts.items():
result.append((k, v))
return result
"""
SQL
"""
def sql(self, sql: str, fetch_size: int = 1024, format: str = "json"):
logger.debug("OBConnection.sql get sql: %s", sql)
def normalize_sql(sql_text: str) -> str:
cleaned = sql_text.strip().rstrip(";")
cleaned = re.sub(r"[`]+", "", cleaned)
cleaned = re.sub(
r"json_extract_string\s*\(\s*([^,]+?)\s*,\s*([^)]+?)\s*\)",
r"JSON_UNQUOTE(JSON_EXTRACT(\1, \2))",
cleaned,
flags=re.IGNORECASE,
)
cleaned = re.sub(
r"json_extract_isnull\s*\(\s*([^,]+?)\s*,\s*([^)]+?)\s*\)",
r"(JSON_EXTRACT(\1, \2) IS NULL)",
cleaned,
flags=re.IGNORECASE,
)
return cleaned
def coerce_value(value: Any) -> Any:
if isinstance(value, np.generic):
return value.item()
if isinstance(value, bytes):
return value.decode("utf-8", errors="ignore")
return value
sql_text = normalize_sql(sql)
if fetch_size and fetch_size > 0:
sql_lower = sql_text.lstrip().lower()
if re.match(r"^(select|with)\b", sql_lower) and not re.search(r"\blimit\b", sql_lower):
sql_text = f"{sql_text} LIMIT {int(fetch_size)}"
logger.debug("OBConnection.sql to ob: %s", sql_text)
try:
res = self.client.perform_raw_text_sql(sql_text)
except Exception:
logger.exception("OBConnection.sql got exception")
raise
if res is None:
return None
columns = list(res.keys()) if hasattr(res, "keys") else []
try:
rows = res.fetchmany(fetch_size) if fetch_size and fetch_size > 0 else res.fetchall()
except Exception:
rows = res.fetchall()
rows_list = [[coerce_value(v) for v in list(row)] for row in rows]
result = {
"columns": [{"name": col, "type": "text"} for col in columns],
"rows": rows_list,
}
if format == "markdown":
header = "|" + "|".join(columns) + "|" if columns else ""
separator = "|" + "|".join(["---" for _ in columns]) + "|" if columns else ""
body = "\n".join(["|" + "|".join([str(v) for v in row]) + "|" for row in rows_list])
result["markdown"] = "\n".join([line for line in [header, separator, body] if line])
return result
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/utils/ob_conn.py",
"license": "Apache License 2.0",
"lines": 1252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/variable_assigner.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
import os
import numbers
from agent.component.base import ComponentBase, ComponentParamBase
from api.utils.api_utils import timeout
class VariableAssignerParam(ComponentParamBase):
"""
Define the Variable Assigner component parameters.
"""
def __init__(self):
super().__init__()
self.variables=[]
def check(self):
return True
def get_input_form(self) -> dict[str, dict]:
return {
"items": {
"type": "json",
"name": "Items"
}
}
class VariableAssigner(ComponentBase,ABC):
component_name = "VariableAssigner"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
if not isinstance(self._param.variables,list):
return
else:
for item in self._param.variables:
if any([not item.get("variable"), not item.get("operator"), not item.get("parameter")]):
assert "Variable is not complete."
variable=item["variable"]
operator=item["operator"]
parameter=item["parameter"]
variable_value=self._canvas.get_variable_value(variable)
new_variable=self._operate(variable_value,operator,parameter)
self._canvas.set_variable_value(variable, new_variable)
def _operate(self,variable,operator,parameter):
if operator == "overwrite":
return self._overwrite(parameter)
elif operator == "clear":
return self._clear(variable)
elif operator == "set":
return self._set(variable,parameter)
elif operator == "append":
return self._append(variable,parameter)
elif operator == "extend":
return self._extend(variable,parameter)
elif operator == "remove_first":
return self._remove_first(variable)
elif operator == "remove_last":
return self._remove_last(variable)
elif operator == "+=":
return self._add(variable,parameter)
elif operator == "-=":
return self._subtract(variable,parameter)
elif operator == "*=":
return self._multiply(variable,parameter)
elif operator == "/=":
return self._divide(variable,parameter)
else:
return
def _overwrite(self,parameter):
return self._canvas.get_variable_value(parameter)
def _clear(self,variable):
if isinstance(variable,list):
return []
elif isinstance(variable,str):
return ""
elif isinstance(variable,dict):
return {}
elif isinstance(variable,int):
return 0
elif isinstance(variable,float):
return 0.0
elif isinstance(variable,bool):
return False
else:
return None
def _set(self,variable,parameter):
if variable is None:
return self._canvas.get_value_with_variable(parameter)
elif isinstance(variable,str):
return self._canvas.get_value_with_variable(parameter)
elif isinstance(variable,bool):
return parameter
elif isinstance(variable,int):
return parameter
elif isinstance(variable,float):
return parameter
else:
return parameter
def _append(self,variable,parameter):
parameter=self._canvas.get_variable_value(parameter)
if variable is None:
variable=[]
if not isinstance(variable,list):
return "ERROR:VARIABLE_NOT_LIST"
elif len(variable)!=0 and not isinstance(parameter,type(variable[0])):
return "ERROR:PARAMETER_NOT_LIST_ELEMENT_TYPE"
else:
variable.append(parameter)
return variable
def _extend(self,variable,parameter):
parameter=self._canvas.get_variable_value(parameter)
if variable is None:
variable=[]
if not isinstance(variable,list):
return "ERROR:VARIABLE_NOT_LIST"
elif not isinstance(parameter,list):
return "ERROR:PARAMETER_NOT_LIST"
elif len(variable)!=0 and len(parameter)!=0 and not isinstance(parameter[0],type(variable[0])):
return "ERROR:PARAMETER_NOT_LIST_ELEMENT_TYPE"
else:
return variable + parameter
def _remove_first(self,variable):
if len(variable)==0:
return variable
if not isinstance(variable,list):
return "ERROR:VARIABLE_NOT_LIST"
else:
return variable[1:]
def _remove_last(self,variable):
if len(variable)==0:
return variable
if not isinstance(variable,list):
return "ERROR:VARIABLE_NOT_LIST"
else:
return variable[:-1]
def is_number(self, value):
if isinstance(value, bool):
return False
return isinstance(value, numbers.Number)
def _add(self,variable,parameter):
if self.is_number(variable) and self.is_number(parameter):
return variable + parameter
else:
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
def _subtract(self,variable,parameter):
if self.is_number(variable) and self.is_number(parameter):
return variable - parameter
else:
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
def _multiply(self,variable,parameter):
if self.is_number(variable) and self.is_number(parameter):
return variable * parameter
else:
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
def _divide(self,variable,parameter):
if self.is_number(variable) and self.is_number(parameter):
if parameter==0:
return "ERROR:DIVIDE_BY_ZERO"
else:
return variable/parameter
else:
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
def thoughts(self) -> str:
return "Assign variables from canvas." | {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/variable_assigner.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/jira/connector.py | """Checkpointed Jira connector that emits markdown blobs for each issue."""
from __future__ import annotations
import argparse
import copy
import logging
import os
import re
from collections.abc import Callable, Generator, Iterable, Iterator, Sequence
from datetime import datetime, timedelta, timezone
from typing import Any
from zoneinfo import ZoneInfo, ZoneInfoNotFoundError
from jira import JIRA
from jira.resources import Issue
from pydantic import Field
from common.data_source.config import (
INDEX_BATCH_SIZE,
JIRA_CONNECTOR_LABELS_TO_SKIP,
JIRA_CONNECTOR_MAX_TICKET_SIZE,
JIRA_TIMEZONE_OFFSET,
ONE_HOUR,
DocumentSource,
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
CheckpointOutputWrapper,
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync,
)
from common.data_source.jira.utils import (
JIRA_CLOUD_API_VERSION,
JIRA_SERVER_API_VERSION,
build_issue_url,
extract_body_text,
extract_named_value,
extract_user,
format_attachments,
format_comments,
parse_jira_datetime,
should_skip_issue,
)
from common.data_source.models import (
ConnectorCheckpoint,
ConnectorFailure,
Document,
DocumentFailure,
SlimDocument,
)
from common.data_source.utils import is_atlassian_cloud_url, is_atlassian_date_error, scoped_url
logger = logging.getLogger(__name__)
_DEFAULT_FIELDS = "summary,description,updated,created,status,priority,assignee,reporter,labels,issuetype,project,comment,attachment"
_SLIM_FIELDS = "key,project"
_MAX_RESULTS_FETCH_IDS = 5000
_JIRA_SLIM_PAGE_SIZE = 500
_JIRA_FULL_PAGE_SIZE = 50
_DEFAULT_ATTACHMENT_SIZE_LIMIT = 10 * 1024 * 1024 # 10MB
class JiraCheckpoint(ConnectorCheckpoint):
"""Checkpoint that tracks which slice of the current JQL result set was emitted."""
start_at: int = 0
cursor: str | None = None
ids_done: bool = False
all_issue_ids: list[list[str]] = Field(default_factory=list)
_TZ_OFFSET_PATTERN = re.compile(r"([+-])(\d{2})(:?)(\d{2})$")
class JiraConnector(CheckpointedConnectorWithPermSync, SlimConnectorWithPermSync):
"""Retrieve Jira issues and emit them as Markdown documents."""
def __init__(
self,
jira_base_url: str,
project_key: str | None = None,
jql_query: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
include_comments: bool = True,
include_attachments: bool = False,
labels_to_skip: Sequence[str] | None = None,
comment_email_blacklist: Sequence[str] | None = None,
scoped_token: bool = False,
attachment_size_limit: int | None = None,
timezone_offset: float | None = None,
) -> None:
if not jira_base_url:
raise ConnectorValidationError("Jira base URL must be provided.")
self.jira_base_url = jira_base_url.rstrip("/")
self.project_key = project_key
self.jql_query = jql_query
self.batch_size = batch_size
self.include_comments = include_comments
self.include_attachments = include_attachments
configured_labels = labels_to_skip or JIRA_CONNECTOR_LABELS_TO_SKIP
self.labels_to_skip = {label.lower() for label in configured_labels}
self.comment_email_blacklist = {email.lower() for email in comment_email_blacklist or []}
self.scoped_token = scoped_token
self.jira_client: JIRA | None = None
self.max_ticket_size = JIRA_CONNECTOR_MAX_TICKET_SIZE
self.attachment_size_limit = attachment_size_limit if attachment_size_limit and attachment_size_limit > 0 else _DEFAULT_ATTACHMENT_SIZE_LIMIT
self._fields_param = _DEFAULT_FIELDS
self._slim_fields = _SLIM_FIELDS
tz_offset_value = float(timezone_offset) if timezone_offset is not None else float(JIRA_TIMEZONE_OFFSET)
self.timezone_offset = tz_offset_value
self.timezone = timezone(offset=timedelta(hours=tz_offset_value))
self._timezone_overridden = timezone_offset is not None
# -------------------------------------------------------------------------
# Connector lifecycle helpers
# -------------------------------------------------------------------------
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Instantiate the Jira client using either an API token or username/password."""
jira_url_for_client = self.jira_base_url
if self.scoped_token:
if is_atlassian_cloud_url(self.jira_base_url):
try:
jira_url_for_client = scoped_url(self.jira_base_url, "jira")
except ValueError as exc:
raise ConnectorValidationError(str(exc)) from exc
else:
logger.warning("[Jira] Scoped token requested but Jira base URL does not appear to be an Atlassian Cloud domain; scoped token ignored.")
user_email = credentials.get("jira_user_email") or credentials.get("username")
api_token = credentials.get("jira_api_token") or credentials.get("token") or credentials.get("api_token")
password = credentials.get("jira_password") or credentials.get("password")
rest_api_version = credentials.get("rest_api_version")
if not rest_api_version:
rest_api_version = JIRA_CLOUD_API_VERSION if api_token else JIRA_SERVER_API_VERSION
options: dict[str, Any] = {"rest_api_version": rest_api_version}
try:
if user_email and api_token:
self.jira_client = JIRA(
server=jira_url_for_client,
basic_auth=(user_email, api_token),
options=options,
)
elif api_token:
self.jira_client = JIRA(
server=jira_url_for_client,
token_auth=api_token,
options=options,
)
elif user_email and password:
self.jira_client = JIRA(
server=jira_url_for_client,
basic_auth=(user_email, password),
options=options,
)
else:
raise ConnectorMissingCredentialError("Jira credentials must include either an API token or username/password.")
except Exception as exc: # pragma: no cover - jira lib raises many types
raise ConnectorMissingCredentialError(f"Jira: {exc}") from exc
self._sync_timezone_from_server()
return None
def validate_connector_settings(self) -> None:
"""Validate connectivity by fetching basic Jira info."""
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
try:
if self.jql_query:
dummy_checkpoint = self.build_dummy_checkpoint()
checkpoint_callback = self._make_checkpoint_callback(dummy_checkpoint)
iterator = self._perform_jql_search(
jql=self.jql_query,
start=0,
max_results=1,
fields="key",
all_issue_ids=dummy_checkpoint.all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=dummy_checkpoint.cursor,
ids_done=dummy_checkpoint.ids_done,
)
next(iter(iterator), None)
elif self.project_key:
self.jira_client.project(self.project_key)
else:
self.jira_client.projects()
except Exception as exc: # pragma: no cover - dependent on Jira responses
self._handle_validation_error(exc)
# -------------------------------------------------------------------------
# Checkpointed connector implementation
# -------------------------------------------------------------------------
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: JiraCheckpoint,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
"""Load Jira issues, emitting a Document per issue."""
try:
return (yield from self._load_with_retry(start, end, checkpoint))
except Exception as exc:
logger.exception(f"[Jira] Jira query ultimately failed: {exc}")
yield ConnectorFailure(
failure_message=f"Failed to query Jira: {exc}",
exception=exc,
)
return JiraCheckpoint(has_more=False, start_at=checkpoint.start_at)
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: JiraCheckpoint,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
"""Permissions are not synced separately, so reuse the standard loader."""
return (yield from self.load_from_checkpoint(start=start, end=end, checkpoint=checkpoint))
def _load_with_retry(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: JiraCheckpoint,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
attempt_start = start
retried_with_buffer = False
attempt = 0
while True:
attempt += 1
jql = self._build_jql(attempt_start, end)
logger.info(f"[Jira] Executing Jira JQL attempt {attempt} (buffered_retry={retried_with_buffer})[start and end parameters redacted]")
try:
return (yield from self._load_from_checkpoint_internal(jql, checkpoint, start_filter=start))
except Exception as exc:
if attempt_start is not None and not retried_with_buffer and is_atlassian_date_error(exc):
attempt_start = attempt_start - ONE_HOUR
retried_with_buffer = True
logger.info(f"[Jira] Atlassian date error detected; retrying with start={attempt_start}.")
continue
raise
def _handle_validation_error(self, exc: Exception) -> None:
status_code = getattr(exc, "status_code", None)
if status_code == 401:
raise InsufficientPermissionsError("Jira credential appears to be invalid or expired (HTTP 401).") from exc
if status_code == 403:
raise InsufficientPermissionsError("Jira token does not have permission to access the requested resources (HTTP 403).") from exc
if status_code == 404:
raise ConnectorValidationError("Jira resource not found (HTTP 404).") from exc
if status_code == 429:
raise ConnectorValidationError("Jira rate limit exceeded during validation (HTTP 429).") from exc
message = getattr(exc, "text", str(exc))
if not message:
raise UnexpectedValidationError("Unexpected Jira validation error.") from exc
raise ConnectorValidationError(f"Jira validation failed: {message}") from exc
def _load_from_checkpoint_internal(
self,
jql: str,
checkpoint: JiraCheckpoint,
start_filter: SecondsSinceUnixEpoch | None = None,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
assert self.jira_client, "load_credentials must be called before loading issues."
page_size = self._full_page_size()
new_checkpoint = copy.deepcopy(checkpoint)
starting_offset = new_checkpoint.start_at or 0
current_offset = starting_offset
checkpoint_callback = self._make_checkpoint_callback(new_checkpoint)
issue_iter = self._perform_jql_search(
jql=jql,
start=current_offset,
max_results=page_size,
fields=self._fields_param,
all_issue_ids=new_checkpoint.all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=new_checkpoint.cursor,
ids_done=new_checkpoint.ids_done,
)
start_cutoff = float(start_filter) if start_filter is not None else None
for issue in issue_iter:
current_offset += 1
issue_key = getattr(issue, "key", "unknown")
if should_skip_issue(issue, self.labels_to_skip):
continue
issue_updated = parse_jira_datetime(issue.raw.get("fields", {}).get("updated"))
if start_cutoff is not None and issue_updated is not None and issue_updated.timestamp() <= start_cutoff:
# Jira JQL only supports minute precision, so we discard already-processed
# issues here based on the original second-level cutoff.
continue
try:
document = self._issue_to_document(issue)
except Exception as exc: # pragma: no cover - defensive
logger.exception(f"[Jira] Failed to convert Jira issue {issue_key}: {exc}")
yield ConnectorFailure(
failure_message=f"Failed to convert Jira issue {issue_key}: {exc}",
failed_document=DocumentFailure(
document_id=issue_key,
document_link=build_issue_url(self.jira_base_url, issue_key),
),
exception=exc,
)
continue
if document is not None:
yield document
if self.include_attachments:
for attachment_document in self._attachment_documents(issue):
if attachment_document is not None:
yield attachment_document
self._update_checkpoint_for_next_run(
checkpoint=new_checkpoint,
current_offset=current_offset,
starting_offset=starting_offset,
page_size=page_size,
)
new_checkpoint.start_at = current_offset
return new_checkpoint
def build_dummy_checkpoint(self) -> JiraCheckpoint:
"""Create an empty checkpoint used to kick off ingestion."""
return JiraCheckpoint(has_more=True, start_at=0)
def validate_checkpoint_json(self, checkpoint_json: str) -> JiraCheckpoint:
"""Validate a serialized checkpoint."""
return JiraCheckpoint.model_validate_json(checkpoint_json)
# -------------------------------------------------------------------------
# Slim connector implementation
# -------------------------------------------------------------------------
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None, # noqa: ARG002 - maintained for interface compatibility
) -> Generator[list[SlimDocument], None, None]:
"""Return lightweight references to Jira issues (used for permission syncing)."""
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
start_ts = start if start is not None else 0
end_ts = end if end is not None else datetime.now(timezone.utc).timestamp()
jql = self._build_jql(start_ts, end_ts)
checkpoint = self.build_dummy_checkpoint()
checkpoint_callback = self._make_checkpoint_callback(checkpoint)
prev_offset = 0
current_offset = 0
slim_batch: list[SlimDocument] = []
while checkpoint.has_more:
for issue in self._perform_jql_search(
jql=jql,
start=current_offset,
max_results=_JIRA_SLIM_PAGE_SIZE,
fields=self._slim_fields,
all_issue_ids=checkpoint.all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=checkpoint.cursor,
ids_done=checkpoint.ids_done,
):
current_offset += 1
if should_skip_issue(issue, self.labels_to_skip):
continue
doc_id = build_issue_url(self.jira_base_url, issue.key)
slim_batch.append(SlimDocument(id=doc_id))
if len(slim_batch) >= _JIRA_SLIM_PAGE_SIZE:
yield slim_batch
slim_batch = []
self._update_checkpoint_for_next_run(
checkpoint=checkpoint,
current_offset=current_offset,
starting_offset=prev_offset,
page_size=_JIRA_SLIM_PAGE_SIZE,
)
prev_offset = current_offset
if slim_batch:
yield slim_batch
# -------------------------------------------------------------------------
# Internal helpers
# -------------------------------------------------------------------------
def _build_jql(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> str:
clauses: list[str] = []
if self.jql_query:
clauses.append(f"({self.jql_query})")
elif self.project_key:
clauses.append(f'project = "{self.project_key}"')
else:
raise ConnectorValidationError("Either project_key or jql_query must be provided for Jira connector.")
if self.labels_to_skip:
labels = ", ".join(f'"{label}"' for label in self.labels_to_skip)
clauses.append(f"labels NOT IN ({labels})")
if start is not None:
clauses.append(f'updated >= "{self._format_jql_time(start)}"')
if end is not None:
clauses.append(f'updated <= "{self._format_jql_time(end)}"')
if not clauses:
raise ConnectorValidationError("Unable to build Jira JQL query.")
jql = " AND ".join(clauses)
if "order by" not in jql.lower():
jql = f"{jql} ORDER BY updated ASC"
return jql
def _format_jql_time(self, timestamp: SecondsSinceUnixEpoch) -> str:
dt_utc = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
dt_local = dt_utc.astimezone(self.timezone)
# Jira only accepts minute-precision timestamps in JQL, so we format accordingly
# and rely on a post-query second-level filter to avoid duplicates.
return dt_local.strftime("%Y-%m-%d %H:%M")
def _issue_to_document(self, issue: Issue) -> Document | None:
fields = issue.raw.get("fields", {})
summary = fields.get("summary") or ""
description_text = extract_body_text(fields.get("description"))
comments_text = (
format_comments(
fields.get("comment"),
blacklist=self.comment_email_blacklist,
)
if self.include_comments
else ""
)
attachments_text = format_attachments(fields.get("attachment"))
reporter_name, reporter_email = extract_user(fields.get("reporter"))
assignee_name, assignee_email = extract_user(fields.get("assignee"))
status = extract_named_value(fields.get("status"))
priority = extract_named_value(fields.get("priority"))
issue_type = extract_named_value(fields.get("issuetype"))
project = fields.get("project") or {}
issue_url = build_issue_url(self.jira_base_url, issue.key)
metadata_lines = [
f"key: {issue.key}",
f"url: {issue_url}",
f"summary: {summary}",
f"status: {status or 'Unknown'}",
f"priority: {priority or 'Unspecified'}",
f"issue_type: {issue_type or 'Unknown'}",
f"project: {project.get('name') or ''}",
f"project_key: {project.get('key') or self.project_key or ''}",
]
if reporter_name:
metadata_lines.append(f"reporter: {reporter_name}")
if reporter_email:
metadata_lines.append(f"reporter_email: {reporter_email}")
if assignee_name:
metadata_lines.append(f"assignee: {assignee_name}")
if assignee_email:
metadata_lines.append(f"assignee_email: {assignee_email}")
if fields.get("labels"):
metadata_lines.append(f"labels: {', '.join(fields.get('labels'))}")
created_dt = parse_jira_datetime(fields.get("created"))
updated_dt = parse_jira_datetime(fields.get("updated")) or created_dt or datetime.now(timezone.utc)
metadata_lines.append(f"created: {created_dt.isoformat() if created_dt else ''}")
metadata_lines.append(f"updated: {updated_dt.isoformat() if updated_dt else ''}")
sections: list[str] = [
"---",
"\n".join(filter(None, metadata_lines)),
"---",
"",
"## Description",
description_text or "No description provided.",
]
if comments_text:
sections.extend(["", "## Comments", comments_text])
if attachments_text:
sections.extend(["", "## Attachments", attachments_text])
blob_text = "\n".join(sections).strip() + "\n"
blob = blob_text.encode("utf-8")
if len(blob) > self.max_ticket_size:
logger.info(f"[Jira] Skipping {issue.key} because it exceeds the maximum size of {self.max_ticket_size} bytes.")
return None
semantic_identifier = f"{issue.key}: {summary}" if summary else issue.key
return Document(
id=issue_url,
source=DocumentSource.JIRA,
semantic_identifier=semantic_identifier,
extension=".md",
blob=blob,
doc_updated_at=updated_dt,
size_bytes=len(blob),
)
def _attachment_documents(self, issue: Issue) -> Iterable[Document]:
attachments = issue.raw.get("fields", {}).get("attachment") or []
for attachment in attachments:
try:
document = self._attachment_to_document(issue, attachment)
if document is not None:
yield document
except Exception as exc: # pragma: no cover - defensive
failed_id = attachment.get("id") or attachment.get("filename")
issue_key = getattr(issue, "key", "unknown")
logger.warning(f"[Jira] Failed to process attachment {failed_id} for issue {issue_key}: {exc}")
def _attachment_to_document(self, issue: Issue, attachment: dict[str, Any]) -> Document | None:
if not self.include_attachments:
return None
filename = attachment.get("filename")
content_url = attachment.get("content")
if not filename or not content_url:
return None
try:
attachment_size = int(attachment.get("size", 0))
except (TypeError, ValueError):
attachment_size = 0
if attachment_size and attachment_size > self.attachment_size_limit:
logger.info(f"[Jira] Skipping attachment {filename} on {issue.key} because reported size exceeds limit ({self.attachment_size_limit} bytes).")
return None
blob = self._download_attachment(content_url)
if blob is None:
return None
if len(blob) > self.attachment_size_limit:
logger.info(f"[Jira] Skipping attachment {filename} on {issue.key} because it exceeds the size limit ({self.attachment_size_limit} bytes).")
return None
attachment_time = parse_jira_datetime(attachment.get("created")) or parse_jira_datetime(attachment.get("updated"))
updated_dt = attachment_time or parse_jira_datetime(issue.raw.get("fields", {}).get("updated")) or datetime.now(timezone.utc)
extension = os.path.splitext(filename)[1] or ""
document_id = f"{issue.key}::attachment::{attachment.get('id') or filename}"
semantic_identifier = f"{issue.key} attachment: {filename}"
return Document(
id=document_id,
source=DocumentSource.JIRA,
semantic_identifier=semantic_identifier,
extension=extension,
blob=blob,
doc_updated_at=updated_dt,
size_bytes=len(blob),
)
def _download_attachment(self, url: str) -> bytes | None:
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
response = self.jira_client._session.get(url)
response.raise_for_status()
return response.content
def _sync_timezone_from_server(self) -> None:
if self._timezone_overridden or not self.jira_client:
return
try:
server_info = self.jira_client.server_info()
except Exception as exc: # pragma: no cover - defensive
logger.info(f"[Jira] Unable to determine timezone from server info; continuing with offset {self.timezone_offset}. Error: {exc}")
return
detected_offset = self._extract_timezone_offset(server_info)
if detected_offset is None or detected_offset == self.timezone_offset:
return
self.timezone_offset = detected_offset
self.timezone = timezone(offset=timedelta(hours=detected_offset))
logger.info(f"[Jira] Timezone offset adjusted to {detected_offset} hours using Jira server info.")
def _extract_timezone_offset(self, server_info: dict[str, Any]) -> float | None:
server_time_raw = server_info.get("serverTime")
if isinstance(server_time_raw, str):
offset = self._parse_offset_from_datetime_string(server_time_raw)
if offset is not None:
return offset
tz_name = server_info.get("timeZone")
if isinstance(tz_name, str):
offset = self._offset_from_zone_name(tz_name)
if offset is not None:
return offset
return None
@staticmethod
def _parse_offset_from_datetime_string(value: str) -> float | None:
normalized = JiraConnector._normalize_datetime_string(value)
try:
dt = datetime.fromisoformat(normalized)
except ValueError:
return None
if dt.tzinfo is None:
return 0.0
offset = dt.tzinfo.utcoffset(dt)
if offset is None:
return None
return offset.total_seconds() / 3600.0
@staticmethod
def _normalize_datetime_string(value: str) -> str:
trimmed = (value or "").strip()
if trimmed.endswith("Z"):
return f"{trimmed[:-1]}+00:00"
match = _TZ_OFFSET_PATTERN.search(trimmed)
if match and match.group(3) != ":":
sign, hours, _, minutes = match.groups()
trimmed = f"{trimmed[: match.start()]}{sign}{hours}:{minutes}"
return trimmed
@staticmethod
def _offset_from_zone_name(name: str) -> float | None:
try:
tz = ZoneInfo(name)
except (ZoneInfoNotFoundError, ValueError):
return None
reference = datetime.now(tz)
offset = reference.utcoffset()
if offset is None:
return None
return offset.total_seconds() / 3600.0
def _is_cloud_client(self) -> bool:
if not self.jira_client:
return False
rest_version = str(self.jira_client._options.get("rest_api_version", "")).strip()
return rest_version == str(JIRA_CLOUD_API_VERSION)
def _full_page_size(self) -> int:
return max(1, min(self.batch_size, _JIRA_FULL_PAGE_SIZE))
def _perform_jql_search(
self,
*,
jql: str,
start: int,
max_results: int,
fields: str | None = None,
all_issue_ids: list[list[str]] | None = None,
checkpoint_callback: Callable[[Iterable[list[str]], str | None], None] | None = None,
next_page_token: str | None = None,
ids_done: bool = False,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
is_cloud = self._is_cloud_client()
if is_cloud:
if all_issue_ids is None:
raise ValueError("all_issue_ids is required for Jira Cloud searches.")
yield from self._perform_jql_search_v3(
jql=jql,
max_results=max_results,
fields=fields,
all_issue_ids=all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=next_page_token,
ids_done=ids_done,
)
else:
yield from self._perform_jql_search_v2(
jql=jql,
start=start,
max_results=max_results,
fields=fields,
)
def _perform_jql_search_v3(
self,
*,
jql: str,
max_results: int,
all_issue_ids: list[list[str]],
fields: str | None = None,
checkpoint_callback: Callable[[Iterable[list[str]], str | None], None] | None = None,
next_page_token: str | None = None,
ids_done: bool = False,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
if not ids_done:
new_ids, page_token = self._enhanced_search_ids(jql, next_page_token)
if checkpoint_callback is not None and new_ids:
checkpoint_callback(
self._chunk_issue_ids(new_ids, max_results),
page_token,
)
elif checkpoint_callback is not None:
checkpoint_callback([], page_token)
if all_issue_ids:
issue_ids = all_issue_ids.pop()
if issue_ids:
yield from self._bulk_fetch_issues(issue_ids, fields)
def _perform_jql_search_v2(
self,
*,
jql: str,
start: int,
max_results: int,
fields: str | None = None,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
issues = self.jira_client.search_issues(
jql_str=jql,
startAt=start,
maxResults=max_results,
fields=fields or self._fields_param,
expand="renderedFields",
)
for issue in issues:
yield issue
def _enhanced_search_ids(
self,
jql: str,
next_page_token: str | None,
) -> tuple[list[str], str | None]:
assert self.jira_client, "Jira client not initialized."
enhanced_search_path = self.jira_client._get_url("search/jql")
params: dict[str, str | int | None] = {
"jql": jql,
"maxResults": _MAX_RESULTS_FETCH_IDS,
"nextPageToken": next_page_token,
"fields": "id",
}
response = self.jira_client._session.get(enhanced_search_path, params=params)
response.raise_for_status()
data = response.json()
return [str(issue["id"]) for issue in data.get("issues", [])], data.get("nextPageToken")
def _bulk_fetch_issues(
self,
issue_ids: list[str],
fields: str | None,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
if not issue_ids:
return []
bulk_fetch_path = self.jira_client._get_url("issue/bulkfetch")
payload: dict[str, Any] = {"issueIdsOrKeys": issue_ids}
payload["fields"] = fields.split(",") if fields else ["*all"]
response = self.jira_client._session.post(bulk_fetch_path, json=payload)
response.raise_for_status()
data = response.json()
return [Issue(self.jira_client._options, self.jira_client._session, raw=issue) for issue in data.get("issues", [])]
@staticmethod
def _chunk_issue_ids(issue_ids: list[str], chunk_size: int) -> Iterable[list[str]]:
if chunk_size <= 0:
chunk_size = _JIRA_FULL_PAGE_SIZE
for idx in range(0, len(issue_ids), chunk_size):
yield issue_ids[idx : idx + chunk_size]
def _make_checkpoint_callback(self, checkpoint: JiraCheckpoint) -> Callable[[Iterable[list[str]], str | None], None]:
def checkpoint_callback(
issue_ids: Iterable[list[str]] | list[list[str]],
page_token: str | None,
) -> None:
for id_batch in issue_ids:
checkpoint.all_issue_ids.append(list(id_batch))
checkpoint.cursor = page_token
checkpoint.ids_done = page_token is None
return checkpoint_callback
def _update_checkpoint_for_next_run(
self,
*,
checkpoint: JiraCheckpoint,
current_offset: int,
starting_offset: int,
page_size: int,
) -> None:
if self._is_cloud_client():
checkpoint.has_more = bool(checkpoint.all_issue_ids) or not checkpoint.ids_done
else:
checkpoint.has_more = current_offset - starting_offset == page_size
checkpoint.cursor = None
checkpoint.ids_done = True
checkpoint.all_issue_ids = []
def iterate_jira_documents(
connector: "JiraConnector",
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
iteration_limit: int = 100_000,
) -> Iterator[Document]:
"""Yield documents without materializing the entire result set."""
checkpoint = connector.build_dummy_checkpoint()
iterations = 0
while checkpoint.has_more:
wrapper = CheckpointOutputWrapper[JiraCheckpoint]()
generator = wrapper(connector.load_from_checkpoint(start=start, end=end, checkpoint=checkpoint))
for document, failure, next_checkpoint in generator:
if failure is not None:
failure_message = getattr(failure, "failure_message", str(failure))
raise RuntimeError(f"Failed to load Jira documents: {failure_message}")
if document is not None:
yield document
if next_checkpoint is not None:
checkpoint = next_checkpoint
iterations += 1
if iterations > iteration_limit:
raise RuntimeError("Too many iterations while loading Jira documents.")
def test_jira(
*,
base_url: str,
project_key: str | None = None,
jql_query: str | None = None,
credentials: dict[str, Any],
batch_size: int = INDEX_BATCH_SIZE,
start_ts: float | None = None,
end_ts: float | None = None,
connector_options: dict[str, Any] | None = None,
) -> list[Document]:
"""Programmatic entry point that mirrors the CLI workflow."""
connector_kwargs = connector_options.copy() if connector_options else {}
connector = JiraConnector(
jira_base_url=base_url,
project_key=project_key,
jql_query=jql_query,
batch_size=batch_size,
**connector_kwargs,
)
connector.load_credentials(credentials)
connector.validate_connector_settings()
now_ts = datetime.now(timezone.utc).timestamp()
start = start_ts if start_ts is not None else 0.0
end = end_ts if end_ts is not None else now_ts
documents = list(iterate_jira_documents(connector, start=start, end=end))
logger.info(f"[Jira] Fetched {len(documents)} Jira documents.")
for doc in documents[:5]:
logger.info(f"[Jira] Document {doc.semantic_identifier} ({doc.id}) size={doc.size_bytes} bytes")
return documents
def _build_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Fetch Jira issues and print summary statistics.")
parser.add_argument("--base-url", dest="base_url", default=os.environ.get("JIRA_BASE_URL"))
parser.add_argument("--project", dest="project_key", default=os.environ.get("JIRA_PROJECT_KEY"))
parser.add_argument("--jql", dest="jql_query", default=os.environ.get("JIRA_JQL"))
parser.add_argument("--email", dest="user_email", default=os.environ.get("JIRA_USER_EMAIL"))
parser.add_argument("--token", dest="api_token", default=os.environ.get("JIRA_API_TOKEN"))
parser.add_argument("--password", dest="password", default=os.environ.get("JIRA_PASSWORD"))
parser.add_argument("--batch-size", dest="batch_size", type=int, default=int(os.environ.get("JIRA_BATCH_SIZE", INDEX_BATCH_SIZE)))
parser.add_argument("--include_comments", dest="include_comments", type=bool, default=True)
parser.add_argument("--include_attachments", dest="include_attachments", type=bool, default=True)
parser.add_argument("--attachment_size_limit", dest="attachment_size_limit", type=float, default=_DEFAULT_ATTACHMENT_SIZE_LIMIT)
parser.add_argument("--start-ts", dest="start_ts", type=float, default=None, help="Epoch seconds inclusive lower bound for updated issues.")
parser.add_argument("--end-ts", dest="end_ts", type=float, default=9999999999, help="Epoch seconds inclusive upper bound for updated issues.")
return parser
def main(config: dict[str, Any] | None = None) -> None:
if config is None:
args = _build_arg_parser().parse_args()
config = {
"base_url": args.base_url,
"project_key": args.project_key,
"jql_query": args.jql_query,
"batch_size": args.batch_size,
"start_ts": args.start_ts,
"end_ts": args.end_ts,
"include_comments": args.include_comments,
"include_attachments": args.include_attachments,
"attachment_size_limit": args.attachment_size_limit,
"credentials": {
"jira_user_email": args.user_email,
"jira_api_token": args.api_token,
"jira_password": args.password,
},
}
base_url = config.get("base_url")
credentials = config.get("credentials", {})
if not base_url:
raise RuntimeError("Jira base URL must be provided via config or CLI arguments.")
if not (credentials.get("jira_api_token") or (credentials.get("jira_user_email") and credentials.get("jira_password"))):
raise RuntimeError("Provide either an API token or both email/password for Jira authentication.")
connector_options = {
key: value
for key, value in (
("include_comments", config.get("include_comments")),
("include_attachments", config.get("include_attachments")),
("attachment_size_limit", config.get("attachment_size_limit")),
("labels_to_skip", config.get("labels_to_skip")),
("comment_email_blacklist", config.get("comment_email_blacklist")),
("scoped_token", config.get("scoped_token")),
("timezone_offset", config.get("timezone_offset")),
)
if value is not None
}
documents = test_jira(
base_url=base_url,
project_key=config.get("project_key"),
jql_query=config.get("jql_query"),
credentials=credentials,
batch_size=config.get("batch_size", INDEX_BATCH_SIZE),
start_ts=config.get("start_ts"),
end_ts=config.get("end_ts"),
connector_options=connector_options,
)
preview_count = min(len(documents), 5)
for idx in range(preview_count):
doc = documents[idx]
print(f"[Jira] [Sample {idx + 1}] {doc.semantic_identifier} | id={doc.id} | size={doc.size_bytes} bytes")
print(f"[Jira] Jira connector test completed. Documents fetched: {len(documents)}")
if __name__ == "__main__": # pragma: no cover - manual execution path
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(name)s %(message)s")
main()
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/jira/connector.py",
"license": "Apache License 2.0",
"lines": 837,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/jira/utils.py | """Helper utilities for the Jira connector."""
from __future__ import annotations
import os
from collections.abc import Collection
from datetime import datetime, timezone
from typing import Any, Iterable
from jira.resources import Issue
from common.data_source.utils import datetime_from_string
JIRA_SERVER_API_VERSION = os.environ.get("JIRA_SERVER_API_VERSION", "2")
JIRA_CLOUD_API_VERSION = os.environ.get("JIRA_CLOUD_API_VERSION", "3")
def build_issue_url(base_url: str, issue_key: str) -> str:
"""Return the canonical UI URL for a Jira issue."""
return f"{base_url.rstrip('/')}/browse/{issue_key}"
def parse_jira_datetime(value: Any) -> datetime | None:
"""Best-effort parse of Jira datetime values to aware UTC datetimes."""
if value is None:
return None
if isinstance(value, datetime):
return value.astimezone(timezone.utc) if value.tzinfo else value.replace(tzinfo=timezone.utc)
if isinstance(value, str):
return datetime_from_string(value)
return None
def extract_named_value(value: Any) -> str | None:
"""Extract a readable string out of Jira's typed objects."""
if value is None:
return None
if isinstance(value, str):
return value
if isinstance(value, dict):
return value.get("name") or value.get("value")
return getattr(value, "name", None)
def extract_user(value: Any) -> tuple[str | None, str | None]:
"""Return display name + email tuple for a Jira user blob."""
if value is None:
return None, None
if isinstance(value, dict):
return value.get("displayName"), value.get("emailAddress")
display = getattr(value, "displayName", None)
email = getattr(value, "emailAddress", None)
return display, email
def extract_text_from_adf(adf: Any) -> str:
"""Flatten Atlassian Document Format (ADF) structures to text."""
texts: list[str] = []
def _walk(node: Any) -> None:
if node is None:
return
if isinstance(node, dict):
node_type = node.get("type")
if node_type == "text":
texts.append(node.get("text", ""))
for child in node.get("content", []):
_walk(child)
elif isinstance(node, list):
for child in node:
_walk(child)
_walk(adf)
return "\n".join(part for part in texts if part)
def extract_body_text(value: Any) -> str:
"""Normalize Jira description/comments (raw/adf/str) into plain text."""
if value is None:
return ""
if isinstance(value, str):
return value.strip()
if isinstance(value, dict):
return extract_text_from_adf(value).strip()
return str(value).strip()
def format_comments(
comment_block: Any,
*,
blacklist: Collection[str],
) -> str:
"""Convert Jira comments into a markdown-ish bullet list."""
if not isinstance(comment_block, dict):
return ""
comments = comment_block.get("comments") or []
lines: list[str] = []
normalized_blacklist = {email.lower() for email in blacklist if email}
for comment in comments:
author = comment.get("author") or {}
author_email = (author.get("emailAddress") or "").lower()
if author_email and author_email in normalized_blacklist:
continue
author_name = author.get("displayName") or author.get("name") or author_email or "Unknown"
created = parse_jira_datetime(comment.get("created"))
created_str = created.isoformat() if created else "Unknown time"
body = extract_body_text(comment.get("body"))
if not body:
continue
lines.append(f"- {author_name} ({created_str}):\n{body}")
return "\n\n".join(lines)
def format_attachments(attachments: Any) -> str:
"""List Jira attachments as bullet points."""
if not isinstance(attachments, list):
return ""
attachment_lines: list[str] = []
for attachment in attachments:
filename = attachment.get("filename")
if not filename:
continue
size = attachment.get("size")
size_text = f" ({size} bytes)" if isinstance(size, int) else ""
content_url = attachment.get("content") or ""
url_suffix = f" -> {content_url}" if content_url else ""
attachment_lines.append(f"- {filename}{size_text}{url_suffix}")
return "\n".join(attachment_lines)
def should_skip_issue(issue: Issue, labels_to_skip: set[str]) -> bool:
"""Return True if the issue contains any label from the skip list."""
if not labels_to_skip:
return False
fields = getattr(issue, "raw", {}).get("fields", {})
labels: Iterable[str] = fields.get("labels") or []
for label in labels:
if (label or "").lower() in labels_to_skip:
return True
return False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/jira/utils.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/list_operations.py | from abc import ABC
import os
from agent.component.base import ComponentBase, ComponentParamBase
from api.utils.api_utils import timeout
class ListOperationsParam(ComponentParamBase):
"""
Define the List Operations component parameters.
"""
def __init__(self):
super().__init__()
self.query = ""
self.operations = "topN"
self.n=0
self.sort_method = "asc"
self.filter = {
"operator": "=",
"value": ""
}
self.outputs = {
"result": {
"value": [],
"type": "Array of ?"
},
"first": {
"value": "",
"type": "?"
},
"last": {
"value": "",
"type": "?"
}
}
def check(self):
self.check_empty(self.query, "query")
self.check_valid_value(self.operations, "Support operations", ["topN","head","tail","filter","sort","drop_duplicates"])
def get_input_form(self) -> dict[str, dict]:
return {}
class ListOperations(ComponentBase,ABC):
component_name = "ListOperations"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
self.input_objects=[]
inputs = getattr(self._param, "query", None)
self.inputs = self._canvas.get_variable_value(inputs)
if not isinstance(self.inputs, list):
raise TypeError("The input of List Operations should be an array.")
self.set_input_value(inputs, self.inputs)
if self._param.operations == "topN":
self._topN()
elif self._param.operations == "head":
self._head()
elif self._param.operations == "tail":
self._tail()
elif self._param.operations == "filter":
self._filter()
elif self._param.operations == "sort":
self._sort()
elif self._param.operations == "drop_duplicates":
self._drop_duplicates()
def _coerce_n(self):
try:
return int(getattr(self._param, "n", 0))
except Exception:
return 0
def _set_outputs(self, outputs):
self._param.outputs["result"]["value"] = outputs
self._param.outputs["first"]["value"] = outputs[0] if outputs else None
self._param.outputs["last"]["value"] = outputs[-1] if outputs else None
def _topN(self):
n = self._coerce_n()
if n < 1:
outputs = []
else:
n = min(n, len(self.inputs))
outputs = self.inputs[:n]
self._set_outputs(outputs)
def _head(self):
n = self._coerce_n()
if 1 <= n <= len(self.inputs):
outputs = [self.inputs[n - 1]]
else:
outputs = []
self._set_outputs(outputs)
def _tail(self):
n = self._coerce_n()
if 1 <= n <= len(self.inputs):
outputs = [self.inputs[-n]]
else:
outputs = []
self._set_outputs(outputs)
def _filter(self):
self._set_outputs([i for i in self.inputs if self._eval(self._norm(i),self._param.filter["operator"],self._param.filter["value"])])
def _norm(self,v):
s = "" if v is None else str(v)
return s
def _eval(self, v, operator, value):
if operator == "=":
return v == value
elif operator == "≠":
return v != value
elif operator == "contains":
return value in v
elif operator == "start with":
return v.startswith(value)
elif operator == "end with":
return v.endswith(value)
else:
return False
def _sort(self):
items = self.inputs or []
method = getattr(self._param, "sort_method", "asc") or "asc"
reverse = method == "desc"
if not items:
self._set_outputs([])
return
first = items[0]
if isinstance(first, dict):
outputs = sorted(
items,
key=lambda x: self._hashable(x),
reverse=reverse,
)
else:
outputs = sorted(items, reverse=reverse)
self._set_outputs(outputs)
def _drop_duplicates(self):
seen = set()
outs = []
for item in self.inputs:
k = self._hashable(item)
if k in seen:
continue
seen.add(k)
outs.append(item)
self._set_outputs(outs)
def _hashable(self,x):
if isinstance(x, dict):
return tuple(sorted((k, self._hashable(v)) for k, v in x.items()))
if isinstance(x, (list, tuple)):
return tuple(self._hashable(v) for v in x)
if isinstance(x, set):
return tuple(sorted(self._hashable(v) for v in x))
return x
def thoughts(self) -> str:
return "ListOperation in progress"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/list_operations.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:check_comment_ascii.py | #!/usr/bin/env python3
"""
Check whether given python files contain non-ASCII comments.
How to check the whole git repo:
```
$ git ls-files -z -- '*.py' | xargs -0 python3 check_comment_ascii.py
```
"""
import sys
import tokenize
import ast
import pathlib
import re
ASCII = re.compile(r"^[\n -~]*\Z") # Printable ASCII + newline
def check(src: str, name: str) -> int:
"""
docstring line 1
docstring line 2
"""
ok = 1
# A common comment begins with `#`
with tokenize.open(src) as fp:
for tk in tokenize.generate_tokens(fp.readline):
if tk.type == tokenize.COMMENT and not ASCII.fullmatch(tk.string):
print(f"{name}:{tk.start[0]}: non-ASCII comment: {tk.string}")
ok = 0
# A docstring begins and ends with `'''`
for node in ast.walk(ast.parse(pathlib.Path(src).read_text(), filename=name)):
if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.Module)):
if (doc := ast.get_docstring(node)) and not ASCII.fullmatch(doc):
print(f"{name}:{node.lineno}: non-ASCII docstring: {doc}")
ok = 0
return ok
if __name__ == "__main__":
status = 0
for file in sys.argv[1:]:
if not check(file, file):
status = 1
sys.exit(status)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "check_comment_ascii.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:common/data_source/google_drive/connector.py | """Google Drive connector"""
import copy
import logging
import os
import sys
import threading
from collections.abc import Callable, Generator, Iterator
from enum import Enum
from functools import partial
from typing import Any, Protocol, cast
from urllib.parse import urlparse
from google.auth.exceptions import RefreshError # type: ignore # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore # type: ignore # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore # type: ignore
from googleapiclient.errors import HttpError # type: ignore # type: ignore
from typing_extensions import override
from common.data_source.config import GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD, INDEX_BATCH_SIZE, SLIM_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import ConnectorMissingCredentialError, ConnectorValidationError, CredentialExpiredError, InsufficientPermissionsError
from common.data_source.google_drive.doc_conversion import PermissionSyncContext, build_slim_document, convert_drive_item_to_document, onyx_document_id_from_drive_file
from common.data_source.google_drive.file_retrieval import (
DriveFileFieldType,
crawl_folders_for_files,
get_all_files_for_oauth,
get_all_files_in_my_drive_and_shared,
get_files_in_shared_drive,
get_root_folder_id,
)
from common.data_source.google_drive.model import DriveRetrievalStage, GoogleDriveCheckpoint, GoogleDriveFileType, RetrievedDriveFile, StageCompletion
from common.data_source.google_util.auth import get_google_creds
from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, USER_FIELDS
from common.data_source.google_util.resource import GoogleDriveService, get_admin_service, get_drive_service
from common.data_source.google_util.util import GoogleFields, execute_paginated_retrieval, get_file_owners
from common.data_source.google_util.util_threadpool_concurrency import ThreadSafeDict
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
IndexingHeartbeatInterface,
SlimConnectorWithPermSync,
)
from common.data_source.models import CheckpointOutput, ConnectorFailure, Document, EntityFailure, GenerateSlimDocumentOutput, SecondsSinceUnixEpoch
from common.data_source.utils import datetime_from_string, parallel_yield, run_functions_tuples_in_parallel
MAX_DRIVE_WORKERS = int(os.environ.get("MAX_DRIVE_WORKERS", 4))
SHARED_DRIVE_PAGES_PER_CHECKPOINT = 2
MY_DRIVE_PAGES_PER_CHECKPOINT = 2
OAUTH_PAGES_PER_CHECKPOINT = 2
FOLDERS_PER_CHECKPOINT = 1
def _extract_str_list_from_comma_str(string: str | None) -> list[str]:
if not string:
return []
return [s.strip() for s in string.split(",") if s.strip()]
def _extract_ids_from_urls(urls: list[str]) -> list[str]:
return [urlparse(url).path.strip("/").split("/")[-1] for url in urls]
def _clean_requested_drive_ids(
requested_drive_ids: set[str],
requested_folder_ids: set[str],
all_drive_ids_available: set[str],
) -> tuple[list[str], list[str]]:
invalid_requested_drive_ids = requested_drive_ids - all_drive_ids_available
filtered_folder_ids = requested_folder_ids - all_drive_ids_available
if invalid_requested_drive_ids:
logging.warning(f"Some shared drive IDs were not found. IDs: {invalid_requested_drive_ids}")
logging.warning("Checking for folder access instead...")
filtered_folder_ids.update(invalid_requested_drive_ids)
valid_requested_drive_ids = requested_drive_ids - invalid_requested_drive_ids
return sorted(valid_requested_drive_ids), sorted(filtered_folder_ids)
def add_retrieval_info(
drive_files: Iterator[GoogleDriveFileType | str],
user_email: str,
completion_stage: DriveRetrievalStage,
parent_id: str | None = None,
) -> Iterator[RetrievedDriveFile | str]:
for file in drive_files:
if isinstance(file, str):
yield file
continue
yield RetrievedDriveFile(
drive_file=file,
user_email=user_email,
parent_id=parent_id,
completion_stage=completion_stage,
)
class CredentialedRetrievalMethod(Protocol):
def __call__(
self,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]: ...
class DriveIdStatus(str, Enum):
AVAILABLE = "available"
IN_PROGRESS = "in_progress"
FINISHED = "finished"
class GoogleDriveConnector(SlimConnectorWithPermSync, CheckpointedConnectorWithPermSync):
def __init__(
self,
include_shared_drives: bool = False,
include_my_drives: bool = False,
include_files_shared_with_me: bool = False,
shared_drive_urls: str | None = None,
my_drive_emails: str | None = None,
shared_folder_urls: str | None = None,
specific_user_emails: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
if not any(
(
include_shared_drives,
include_my_drives,
include_files_shared_with_me,
shared_folder_urls,
my_drive_emails,
shared_drive_urls,
)
):
raise ConnectorValidationError(
"Nothing to index. Please specify at least one of the following: include_shared_drives, include_my_drives, include_files_shared_with_me, shared_folder_urls, or my_drive_emails"
)
specific_requests_made = False
if bool(shared_drive_urls) or bool(my_drive_emails) or bool(shared_folder_urls):
specific_requests_made = True
self.specific_requests_made = specific_requests_made
# NOTE: potentially modified in load_credentials if using service account
self.include_files_shared_with_me = False if specific_requests_made else include_files_shared_with_me
self.include_my_drives = False if specific_requests_made else include_my_drives
self.include_shared_drives = False if specific_requests_made else include_shared_drives
shared_drive_url_list = _extract_str_list_from_comma_str(shared_drive_urls)
self._requested_shared_drive_ids = set(_extract_ids_from_urls(shared_drive_url_list))
self._requested_my_drive_emails = set(_extract_str_list_from_comma_str(my_drive_emails))
shared_folder_url_list = _extract_str_list_from_comma_str(shared_folder_urls)
self._requested_folder_ids = set(_extract_ids_from_urls(shared_folder_url_list))
self._specific_user_emails = _extract_str_list_from_comma_str(specific_user_emails)
self._primary_admin_email: str | None = None
self._creds: OAuthCredentials | ServiceAccountCredentials | None = None
self._creds_dict: dict[str, Any] | None = None
# ids of folders and shared drives that have been traversed
self._retrieved_folder_and_drive_ids: set[str] = set()
self.allow_images = False
self.size_threshold = GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD
self.logger = logging.getLogger(self.__class__.__name__)
def set_allow_images(self, value: bool) -> None:
self.allow_images = value
@property
def primary_admin_email(self) -> str:
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email
@property
def google_domain(self) -> str:
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email.split("@")[-1]
@property
def creds(self) -> OAuthCredentials | ServiceAccountCredentials:
if self._creds is None:
raise RuntimeError("Creds missing, should not call this property before calling load_credentials")
return self._creds
# TODO: ensure returned new_creds_dict is actually persisted when this is called?
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
try:
self._primary_admin_email = credentials[DB_CREDENTIALS_PRIMARY_ADMIN_KEY]
except KeyError:
raise ValueError("Credentials json missing primary admin key")
self._creds, new_creds_dict = get_google_creds(
credentials=credentials,
source=DocumentSource.GOOGLE_DRIVE,
)
# Service account connectors don't have a specific setting determining whether
# to include "shared with me" for each user, so we default to true unless the connector
# is in specific folders/drives mode. Note that shared files are only picked up during
# the My Drive stage, so this does nothing if the connector is set to only index shared drives.
if isinstance(self._creds, ServiceAccountCredentials) and not self.specific_requests_made:
self.include_files_shared_with_me = True
self._creds_dict = new_creds_dict
return new_creds_dict
def _update_traversed_parent_ids(self, folder_id: str) -> None:
self._retrieved_folder_and_drive_ids.add(folder_id)
def _get_all_user_emails(self) -> list[str]:
if self._specific_user_emails:
return self._specific_user_emails
# Start with primary admin email
user_emails = [self.primary_admin_email]
# Only fetch additional users if using service account
if isinstance(self.creds, OAuthCredentials):
return user_emails
admin_service = get_admin_service(
creds=self.creds,
user_email=self.primary_admin_email,
)
# Get admins first since they're more likely to have access to most files
for is_admin in [True, False]:
query = "isAdmin=true" if is_admin else "isAdmin=false"
for user in execute_paginated_retrieval(
retrieval_function=admin_service.users().list,
list_key="users",
fields=USER_FIELDS,
domain=self.google_domain,
query=query,
):
if email := user.get("primaryEmail"):
if email not in user_emails:
user_emails.append(email)
return user_emails
def get_all_drive_ids(self) -> set[str]:
return self._get_all_drives_for_user(self.primary_admin_email)
def _get_all_drives_for_user(self, user_email: str) -> set[str]:
drive_service = get_drive_service(self.creds, user_email)
is_service_account = isinstance(self.creds, ServiceAccountCredentials)
self.logger.info(f"Getting all drives for user {user_email} with service account: {is_service_account}")
all_drive_ids: set[str] = set()
for drive in execute_paginated_retrieval(
retrieval_function=drive_service.drives().list,
list_key="drives",
useDomainAdminAccess=is_service_account,
fields="drives(id),nextPageToken",
):
all_drive_ids.add(drive["id"])
if not all_drive_ids:
self.logger.warning("No drives found even though indexing shared drives was requested.")
return all_drive_ids
def make_drive_id_getter(self, drive_ids: list[str], checkpoint: GoogleDriveCheckpoint) -> Callable[[str], str | None]:
status_lock = threading.Lock()
in_progress_drive_ids = {
completion.current_folder_or_drive_id: user_email
for user_email, completion in checkpoint.completion_map.items()
if completion.stage == DriveRetrievalStage.SHARED_DRIVE_FILES and completion.current_folder_or_drive_id is not None
}
drive_id_status: dict[str, DriveIdStatus] = {}
for drive_id in drive_ids:
if drive_id in self._retrieved_folder_and_drive_ids:
drive_id_status[drive_id] = DriveIdStatus.FINISHED
elif drive_id in in_progress_drive_ids:
drive_id_status[drive_id] = DriveIdStatus.IN_PROGRESS
else:
drive_id_status[drive_id] = DriveIdStatus.AVAILABLE
def get_available_drive_id(thread_id: str) -> str | None:
completion = checkpoint.completion_map[thread_id]
with status_lock:
future_work = None
for drive_id, status in drive_id_status.items():
if drive_id in self._retrieved_folder_and_drive_ids:
drive_id_status[drive_id] = DriveIdStatus.FINISHED
continue
if drive_id in completion.processed_drive_ids:
continue
if status == DriveIdStatus.AVAILABLE:
# add to processed drive ids so if this user fails to retrieve once
# they won't try again on the next checkpoint run
completion.processed_drive_ids.add(drive_id)
return drive_id
elif status == DriveIdStatus.IN_PROGRESS:
self.logger.debug(f"Drive id in progress: {drive_id}")
future_work = drive_id
if future_work:
# in this case, all drive ids are either finished or in progress.
# This thread will pick up one of the in progress ones in case it fails.
# This is a much simpler approach than waiting for a failure picking it up,
# at the cost of some repeated work until all shared drives are retrieved.
# we avoid apocalyptic cases like all threads focusing on one huge drive
# because the drive id is added to _retrieved_folder_and_drive_ids after any thread
# manages to retrieve any file from it (unfortunately, this is also the reason we currently
# sometimes fail to retrieve restricted access folders/files)
completion.processed_drive_ids.add(future_work)
return future_work
return None # no work available, return None
return get_available_drive_id
def _impersonate_user_for_retrieval(
self,
user_email: str,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
get_new_drive_id: Callable[[str], str | None],
sorted_filtered_folder_ids: list[str],
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
self.logger.info(f"Impersonating user {user_email}")
curr_stage = checkpoint.completion_map[user_email]
resuming = True
if curr_stage.stage == DriveRetrievalStage.START:
self.logger.info(f"Setting stage to {DriveRetrievalStage.MY_DRIVE_FILES.value}")
curr_stage.stage = DriveRetrievalStage.MY_DRIVE_FILES
resuming = False
drive_service = get_drive_service(self.creds, user_email)
# validate that the user has access to the drive APIs by performing a simple
# request and checking for a 401
try:
self.logger.debug(f"Getting root folder id for user {user_email}")
get_root_folder_id(drive_service)
except HttpError as e:
if e.status_code == 401:
# fail gracefully, let the other impersonations continue
# one user without access shouldn't block the entire connector
self.logger.warning(f"User '{user_email}' does not have access to the drive APIs.")
# mark this user as done so we don't try to retrieve anything for them
# again
curr_stage.stage = DriveRetrievalStage.DONE
return
raise
except RefreshError as e:
self.logger.warning(f"User '{user_email}' could not refresh their token. Error: {e}")
# mark this user as done so we don't try to retrieve anything for them
# again
yield RetrievedDriveFile(
completion_stage=DriveRetrievalStage.DONE,
drive_file={},
user_email=user_email,
error=e,
)
curr_stage.stage = DriveRetrievalStage.DONE
return
# if we are including my drives, try to get the current user's my
# drive if any of the following are true:
# - include_my_drives is true
# - the current user's email is in the requested emails
if curr_stage.stage == DriveRetrievalStage.MY_DRIVE_FILES:
if self.include_my_drives or user_email in self._requested_my_drive_emails:
self.logger.info(
f"Getting all files in my drive as '{user_email}. Resuming: {resuming}. Stage completed until: {curr_stage.completed_until}. Next page token: {curr_stage.next_page_token}"
)
for file_or_token in add_retrieval_info(
get_all_files_in_my_drive_and_shared(
service=drive_service,
update_traversed_ids_func=self._update_traversed_parent_ids,
field_type=field_type,
include_shared_with_me=self.include_files_shared_with_me,
max_num_pages=MY_DRIVE_PAGES_PER_CHECKPOINT,
start=curr_stage.completed_until if resuming else start,
end=end,
cache_folders=not bool(curr_stage.completed_until),
page_token=curr_stage.next_page_token,
),
user_email,
DriveRetrievalStage.MY_DRIVE_FILES,
):
if isinstance(file_or_token, str):
self.logger.debug(f"Done with max num pages for user {user_email}")
checkpoint.completion_map[user_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
checkpoint.completion_map[user_email].next_page_token = None
curr_stage.stage = DriveRetrievalStage.SHARED_DRIVE_FILES
curr_stage.current_folder_or_drive_id = None
return # resume from next stage on the next run
if curr_stage.stage == DriveRetrievalStage.SHARED_DRIVE_FILES:
def _yield_from_drive(drive_id: str, drive_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile | str]:
yield from add_retrieval_info(
get_files_in_shared_drive(
service=drive_service,
drive_id=drive_id,
field_type=field_type,
max_num_pages=SHARED_DRIVE_PAGES_PER_CHECKPOINT,
update_traversed_ids_func=self._update_traversed_parent_ids,
cache_folders=not bool(drive_start), # only cache folders for 0 or None
start=drive_start,
end=end,
page_token=curr_stage.next_page_token,
),
user_email,
DriveRetrievalStage.SHARED_DRIVE_FILES,
parent_id=drive_id,
)
# resume from a checkpoint
if resuming and (drive_id := curr_stage.current_folder_or_drive_id):
resume_start = curr_stage.completed_until
for file_or_token in _yield_from_drive(drive_id, resume_start):
if isinstance(file_or_token, str):
checkpoint.completion_map[user_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
drive_id = get_new_drive_id(user_email)
if drive_id:
self.logger.info(f"Getting files in shared drive '{drive_id}' as '{user_email}. Resuming: {resuming}")
curr_stage.completed_until = 0
curr_stage.current_folder_or_drive_id = drive_id
for file_or_token in _yield_from_drive(drive_id, start):
if isinstance(file_or_token, str):
checkpoint.completion_map[user_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
curr_stage.current_folder_or_drive_id = None
return # get a new drive id on the next run
checkpoint.completion_map[user_email].next_page_token = None
curr_stage.stage = DriveRetrievalStage.FOLDER_FILES
curr_stage.current_folder_or_drive_id = None
return # resume from next stage on the next run
# In the folder files section of service account retrieval we take extra care
# to not retrieve duplicate docs. In particular, we only add a folder to
# retrieved_folder_and_drive_ids when all users are finished retrieving files
# from that folder, and maintain a set of all file ids that have been retrieved
# for each folder. This might get rather large; in practice we assume that the
# specific folders users choose to index don't have too many files.
if curr_stage.stage == DriveRetrievalStage.FOLDER_FILES:
def _yield_from_folder_crawl(folder_id: str, folder_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile]:
for retrieved_file in crawl_folders_for_files(
service=drive_service,
parent_id=folder_id,
field_type=field_type,
user_email=user_email,
traversed_parent_ids=self._retrieved_folder_and_drive_ids,
update_traversed_ids_func=self._update_traversed_parent_ids,
start=folder_start,
end=end,
):
yield retrieved_file
# resume from a checkpoint
last_processed_folder = None
if resuming:
folder_id = curr_stage.current_folder_or_drive_id
if folder_id is None:
self.logger.warning(f"folder id not set in checkpoint for user {user_email}. This happens occasionally when the connector is interrupted and resumed.")
else:
resume_start = curr_stage.completed_until
yield from _yield_from_folder_crawl(folder_id, resume_start)
last_processed_folder = folder_id
skipping_seen_folders = last_processed_folder is not None
# NOTE: this assumes a small number of folders to crawl. If someone
# really wants to specify a large number of folders, we should use
# binary search to find the first unseen folder.
num_completed_folders = 0
for folder_id in sorted_filtered_folder_ids:
if skipping_seen_folders:
skipping_seen_folders = folder_id != last_processed_folder
continue
if folder_id in self._retrieved_folder_and_drive_ids:
continue
curr_stage.completed_until = 0
curr_stage.current_folder_or_drive_id = folder_id
if num_completed_folders >= FOLDERS_PER_CHECKPOINT:
return # resume from this folder on the next run
self.logger.info(f"Getting files in folder '{folder_id}' as '{user_email}'")
yield from _yield_from_folder_crawl(folder_id, start)
num_completed_folders += 1
curr_stage.stage = DriveRetrievalStage.DONE
def _manage_service_account_retrieval(
self,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
"""
The current implementation of the service account retrieval does some
initial setup work using the primary admin email, then runs MAX_DRIVE_WORKERS
concurrent threads, each of which impersonates a different user and retrieves
files for that user. Technically, the actual work each thread does is "yield the
next file retrieved by the user", at which point it returns to the thread pool;
see parallel_yield for more details.
"""
if checkpoint.completion_stage == DriveRetrievalStage.START:
checkpoint.completion_stage = DriveRetrievalStage.USER_EMAILS
if checkpoint.completion_stage == DriveRetrievalStage.USER_EMAILS:
all_org_emails: list[str] = self._get_all_user_emails()
checkpoint.user_emails = all_org_emails
checkpoint.completion_stage = DriveRetrievalStage.DRIVE_IDS
else:
if checkpoint.user_emails is None:
raise ValueError("user emails not set")
all_org_emails = checkpoint.user_emails
sorted_drive_ids, sorted_folder_ids = self._determine_retrieval_ids(checkpoint, DriveRetrievalStage.MY_DRIVE_FILES)
# Setup initial completion map on first connector run
for email in all_org_emails:
# don't overwrite existing completion map on resuming runs
if email in checkpoint.completion_map:
continue
checkpoint.completion_map[email] = StageCompletion(
stage=DriveRetrievalStage.START,
completed_until=0,
processed_drive_ids=set(),
)
# we've found all users and drives, now time to actually start
# fetching stuff
self.logger.info(f"Found {len(all_org_emails)} users to impersonate")
self.logger.debug(f"Users: {all_org_emails}")
self.logger.info(f"Found {len(sorted_drive_ids)} drives to retrieve")
self.logger.debug(f"Drives: {sorted_drive_ids}")
self.logger.info(f"Found {len(sorted_folder_ids)} folders to retrieve")
self.logger.debug(f"Folders: {sorted_folder_ids}")
drive_id_getter = self.make_drive_id_getter(sorted_drive_ids, checkpoint)
# only process emails that we haven't already completed retrieval for
non_completed_org_emails = [user_email for user_email, stage_completion in checkpoint.completion_map.items() if stage_completion.stage != DriveRetrievalStage.DONE]
self.logger.debug(f"Non-completed users remaining: {len(non_completed_org_emails)}")
# don't process too many emails before returning a checkpoint. This is
# to resolve the case where there are a ton of emails that don't have access
# to the drive APIs. Without this, we could loop through these emails for
# more than 3 hours, causing a timeout and stalling progress.
email_batch_takes_us_to_completion = True
MAX_EMAILS_TO_PROCESS_BEFORE_CHECKPOINTING = MAX_DRIVE_WORKERS
if len(non_completed_org_emails) > MAX_EMAILS_TO_PROCESS_BEFORE_CHECKPOINTING:
non_completed_org_emails = non_completed_org_emails[:MAX_EMAILS_TO_PROCESS_BEFORE_CHECKPOINTING]
email_batch_takes_us_to_completion = False
user_retrieval_gens = [
self._impersonate_user_for_retrieval(
email,
field_type,
checkpoint,
drive_id_getter,
sorted_folder_ids,
start,
end,
)
for email in non_completed_org_emails
]
yield from parallel_yield(user_retrieval_gens, max_workers=MAX_DRIVE_WORKERS)
# if there are more emails to process, don't mark as complete
if not email_batch_takes_us_to_completion:
return
remaining_folders = (set(sorted_drive_ids) | set(sorted_folder_ids)) - self._retrieved_folder_and_drive_ids
if remaining_folders:
self.logger.warning(f"Some folders/drives were not retrieved. IDs: {remaining_folders}")
if any(checkpoint.completion_map[user_email].stage != DriveRetrievalStage.DONE for user_email in all_org_emails):
self.logger.info("some users did not complete retrieval, returning checkpoint for another run")
return
checkpoint.completion_stage = DriveRetrievalStage.DONE
def _determine_retrieval_ids(
self,
checkpoint: GoogleDriveCheckpoint,
next_stage: DriveRetrievalStage,
) -> tuple[list[str], list[str]]:
all_drive_ids = self.get_all_drive_ids()
sorted_drive_ids: list[str] = []
sorted_folder_ids: list[str] = []
if checkpoint.completion_stage == DriveRetrievalStage.DRIVE_IDS:
if self._requested_shared_drive_ids or self._requested_folder_ids:
(
sorted_drive_ids,
sorted_folder_ids,
) = _clean_requested_drive_ids(
requested_drive_ids=self._requested_shared_drive_ids,
requested_folder_ids=self._requested_folder_ids,
all_drive_ids_available=all_drive_ids,
)
elif self.include_shared_drives:
sorted_drive_ids = sorted(all_drive_ids)
checkpoint.drive_ids_to_retrieve = sorted_drive_ids
checkpoint.folder_ids_to_retrieve = sorted_folder_ids
checkpoint.completion_stage = next_stage
else:
if checkpoint.drive_ids_to_retrieve is None:
raise ValueError("drive ids to retrieve not set in checkpoint")
if checkpoint.folder_ids_to_retrieve is None:
raise ValueError("folder ids to retrieve not set in checkpoint")
# When loading from a checkpoint, load the previously cached drive and folder ids
sorted_drive_ids = checkpoint.drive_ids_to_retrieve
sorted_folder_ids = checkpoint.folder_ids_to_retrieve
return sorted_drive_ids, sorted_folder_ids
def _oauth_retrieval_drives(
self,
field_type: DriveFileFieldType,
drive_service: GoogleDriveService,
drive_ids_to_retrieve: list[str],
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile | str]:
def _yield_from_drive(drive_id: str, drive_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile | str]:
yield from add_retrieval_info(
get_files_in_shared_drive(
service=drive_service,
drive_id=drive_id,
field_type=field_type,
max_num_pages=SHARED_DRIVE_PAGES_PER_CHECKPOINT,
cache_folders=not bool(drive_start), # only cache folders for 0 or None
update_traversed_ids_func=self._update_traversed_parent_ids,
start=drive_start,
end=end,
page_token=checkpoint.completion_map[self.primary_admin_email].next_page_token,
),
self.primary_admin_email,
DriveRetrievalStage.SHARED_DRIVE_FILES,
parent_id=drive_id,
)
# If we are resuming from a checkpoint, we need to finish retrieving the files from the last drive we retrieved
if checkpoint.completion_map[self.primary_admin_email].stage == DriveRetrievalStage.SHARED_DRIVE_FILES:
drive_id = checkpoint.completion_map[self.primary_admin_email].current_folder_or_drive_id
if drive_id is None:
raise ValueError("drive id not set in checkpoint")
resume_start = checkpoint.completion_map[self.primary_admin_email].completed_until
for file_or_token in _yield_from_drive(drive_id, resume_start):
if isinstance(file_or_token, str):
checkpoint.completion_map[self.primary_admin_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
checkpoint.completion_map[self.primary_admin_email].next_page_token = None
for drive_id in drive_ids_to_retrieve:
if drive_id in self._retrieved_folder_and_drive_ids:
self.logger.info(f"Skipping drive '{drive_id}' as it has already been retrieved")
continue
self.logger.info(f"Getting files in shared drive '{drive_id}' as '{self.primary_admin_email}'")
for file_or_token in _yield_from_drive(drive_id, start):
if isinstance(file_or_token, str):
checkpoint.completion_map[self.primary_admin_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
checkpoint.completion_map[self.primary_admin_email].next_page_token = None
def _oauth_retrieval_folders(
self,
field_type: DriveFileFieldType,
drive_service: GoogleDriveService,
drive_ids_to_retrieve: set[str],
folder_ids_to_retrieve: set[str],
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
"""
If there are any remaining folder ids to retrieve found earlier in the
retrieval process, we recursively descend the file tree and retrieve all
files in the folder(s).
"""
# Even if no folders were requested, we still check if any drives were requested
# that could be folders.
remaining_folders = folder_ids_to_retrieve - self._retrieved_folder_and_drive_ids
def _yield_from_folder_crawl(folder_id: str, folder_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile]:
yield from crawl_folders_for_files(
service=drive_service,
parent_id=folder_id,
field_type=field_type,
user_email=self.primary_admin_email,
traversed_parent_ids=self._retrieved_folder_and_drive_ids,
update_traversed_ids_func=self._update_traversed_parent_ids,
start=folder_start,
end=end,
)
# resume from a checkpoint
# TODO: actually checkpoint folder retrieval. Since we moved towards returning from
# generator functions to indicate when a checkpoint should be returned, this code
# shouldn't be used currently. Unfortunately folder crawling is quite difficult to checkpoint
# effectively (likely need separate folder crawling and file retrieval stages),
# so we'll revisit this later.
if checkpoint.completion_map[self.primary_admin_email].stage == DriveRetrievalStage.FOLDER_FILES and (
folder_id := checkpoint.completion_map[self.primary_admin_email].current_folder_or_drive_id
):
resume_start = checkpoint.completion_map[self.primary_admin_email].completed_until
yield from _yield_from_folder_crawl(folder_id, resume_start)
# the times stored in the completion_map aren't used due to the crawling behavior
# instead, the traversed_parent_ids are used to determine what we have left to retrieve
for folder_id in remaining_folders:
self.logger.info(f"Getting files in folder '{folder_id}' as '{self.primary_admin_email}'")
yield from _yield_from_folder_crawl(folder_id, start)
remaining_folders = (drive_ids_to_retrieve | folder_ids_to_retrieve) - self._retrieved_folder_and_drive_ids
if remaining_folders:
self.logger.warning(f"Some folders/drives were not retrieved. IDs: {remaining_folders}")
def _load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GoogleDriveCheckpoint,
include_permissions: bool,
) -> CheckpointOutput:
"""
Entrypoint for the connector; first run is with an empty checkpoint.
"""
if self._creds is None or self._primary_admin_email is None:
raise RuntimeError("Credentials missing, should not call this method before calling load_credentials")
self.logger.info(f"Loading from checkpoint with completion stage: {checkpoint.completion_stage},num retrieved ids: {len(checkpoint.all_retrieved_file_ids)}")
checkpoint = copy.deepcopy(checkpoint)
self._retrieved_folder_and_drive_ids = checkpoint.retrieved_folder_and_drive_ids
try:
yield from self._extract_docs_from_google_drive(checkpoint, start, end, include_permissions)
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError() from e
raise e
checkpoint.retrieved_folder_and_drive_ids = self._retrieved_folder_and_drive_ids
self.logger.info(f"num drive files retrieved: {len(checkpoint.all_retrieved_file_ids)}")
if checkpoint.completion_stage == DriveRetrievalStage.DONE:
checkpoint.has_more = False
return checkpoint
def _checkpointed_retrieval(
self,
retrieval_method: CredentialedRetrievalMethod,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
drive_files = retrieval_method(
field_type=field_type,
checkpoint=checkpoint,
start=start,
end=end,
)
for file in drive_files:
document_id = onyx_document_id_from_drive_file(file.drive_file)
logging.debug(f"Updating checkpoint for file: {file.drive_file.get('name')}. Seen: {document_id in checkpoint.all_retrieved_file_ids}")
checkpoint.completion_map[file.user_email].update(
stage=file.completion_stage,
completed_until=datetime_from_string(file.drive_file[GoogleFields.MODIFIED_TIME.value]).timestamp(),
current_folder_or_drive_id=file.parent_id,
)
if document_id not in checkpoint.all_retrieved_file_ids:
checkpoint.all_retrieved_file_ids.add(document_id)
yield file
def _oauth_retrieval_all_files(
self,
field_type: DriveFileFieldType,
drive_service: GoogleDriveService,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
page_token: str | None = None,
) -> Iterator[RetrievedDriveFile | str]:
if not self.include_files_shared_with_me and not self.include_my_drives:
return
self.logger.info(
f"Getting shared files/my drive files for OAuth "
f"with include_files_shared_with_me={self.include_files_shared_with_me}, "
f"include_my_drives={self.include_my_drives}, "
f"include_shared_drives={self.include_shared_drives}."
f"Using '{self.primary_admin_email}' as the account."
)
yield from add_retrieval_info(
get_all_files_for_oauth(
service=drive_service,
include_files_shared_with_me=self.include_files_shared_with_me,
include_my_drives=self.include_my_drives,
include_shared_drives=self.include_shared_drives,
field_type=field_type,
max_num_pages=OAUTH_PAGES_PER_CHECKPOINT,
start=start,
end=end,
page_token=page_token,
),
self.primary_admin_email,
DriveRetrievalStage.OAUTH_FILES,
)
def _manage_oauth_retrieval(
self,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
if checkpoint.completion_stage == DriveRetrievalStage.START:
checkpoint.completion_stage = DriveRetrievalStage.OAUTH_FILES
checkpoint.completion_map[self.primary_admin_email] = StageCompletion(
stage=DriveRetrievalStage.START,
completed_until=0,
current_folder_or_drive_id=None,
)
drive_service = get_drive_service(self.creds, self.primary_admin_email)
if checkpoint.completion_stage == DriveRetrievalStage.OAUTH_FILES:
completion = checkpoint.completion_map[self.primary_admin_email]
all_files_start = start
# if resuming from a checkpoint
if completion.stage == DriveRetrievalStage.OAUTH_FILES:
all_files_start = completion.completed_until
for file_or_token in self._oauth_retrieval_all_files(
field_type=field_type,
drive_service=drive_service,
start=all_files_start,
end=end,
page_token=checkpoint.completion_map[self.primary_admin_email].next_page_token,
):
if isinstance(file_or_token, str):
checkpoint.completion_map[self.primary_admin_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
checkpoint.completion_stage = DriveRetrievalStage.DRIVE_IDS
checkpoint.completion_map[self.primary_admin_email].next_page_token = None
return # create a new checkpoint
all_requested = self.include_files_shared_with_me and self.include_my_drives and self.include_shared_drives
if all_requested:
# If all 3 are true, we already yielded from get_all_files_for_oauth
checkpoint.completion_stage = DriveRetrievalStage.DONE
return
sorted_drive_ids, sorted_folder_ids = self._determine_retrieval_ids(checkpoint, DriveRetrievalStage.SHARED_DRIVE_FILES)
if checkpoint.completion_stage == DriveRetrievalStage.SHARED_DRIVE_FILES:
for file_or_token in self._oauth_retrieval_drives(
field_type=field_type,
drive_service=drive_service,
drive_ids_to_retrieve=sorted_drive_ids,
checkpoint=checkpoint,
start=start,
end=end,
):
if isinstance(file_or_token, str):
checkpoint.completion_map[self.primary_admin_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
checkpoint.completion_stage = DriveRetrievalStage.FOLDER_FILES
checkpoint.completion_map[self.primary_admin_email].next_page_token = None
return # create a new checkpoint
if checkpoint.completion_stage == DriveRetrievalStage.FOLDER_FILES:
yield from self._oauth_retrieval_folders(
field_type=field_type,
drive_service=drive_service,
drive_ids_to_retrieve=set(sorted_drive_ids),
folder_ids_to_retrieve=set(sorted_folder_ids),
checkpoint=checkpoint,
start=start,
end=end,
)
checkpoint.completion_stage = DriveRetrievalStage.DONE
def _fetch_drive_items(
self,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
retrieval_method = self._manage_service_account_retrieval if isinstance(self.creds, ServiceAccountCredentials) else self._manage_oauth_retrieval
return self._checkpointed_retrieval(
retrieval_method=retrieval_method,
field_type=field_type,
checkpoint=checkpoint,
start=start,
end=end,
)
def _extract_docs_from_google_drive(
self,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
include_permissions: bool,
) -> Iterator[Document | ConnectorFailure]:
"""
Retrieves and converts Google Drive files to documents.
"""
field_type = DriveFileFieldType.WITH_PERMISSIONS if include_permissions else DriveFileFieldType.STANDARD
try:
# Prepare a partial function with the credentials and admin email
convert_func = partial(
convert_drive_item_to_document,
self.creds,
self.allow_images,
self.size_threshold,
(
PermissionSyncContext(
primary_admin_email=self.primary_admin_email,
google_domain=self.google_domain,
)
if include_permissions
else None
),
)
# Fetch files in batches
batches_complete = 0
files_batch: list[RetrievedDriveFile] = []
def _yield_batch(
files_batch: list[RetrievedDriveFile],
) -> Iterator[Document | ConnectorFailure]:
nonlocal batches_complete
# Process the batch using run_functions_tuples_in_parallel
func_with_args = [
(
convert_func,
(
[file.user_email, self.primary_admin_email] + get_file_owners(file.drive_file, self.primary_admin_email),
file.drive_file,
),
)
for file in files_batch
]
results = cast(
list[Document | ConnectorFailure | None],
run_functions_tuples_in_parallel(func_with_args, max_workers=8),
)
self.logger.debug(f"finished processing batch {batches_complete} with {len(results)} results")
docs_and_failures = [result for result in results if result is not None]
self.logger.debug(f"batch {batches_complete} has {len(docs_and_failures)} docs or failures")
if docs_and_failures:
yield from docs_and_failures
batches_complete += 1
self.logger.debug(f"finished yielding batch {batches_complete}")
for retrieved_file in self._fetch_drive_items(
field_type=field_type,
checkpoint=checkpoint,
start=start,
end=end,
):
if retrieved_file.error is None:
files_batch.append(retrieved_file)
continue
# handle retrieval errors
failure_stage = retrieved_file.completion_stage.value
failure_message = f"retrieval failure during stage: {failure_stage},"
failure_message += f"user: {retrieved_file.user_email},"
failure_message += f"parent drive/folder: {retrieved_file.parent_id},"
failure_message += f"error: {retrieved_file.error}"
self.logger.error(failure_message)
yield ConnectorFailure(
failed_entity=EntityFailure(
entity_id=failure_stage,
),
failure_message=failure_message,
exception=retrieved_file.error,
)
yield from _yield_batch(files_batch)
checkpoint.retrieved_folder_and_drive_ids = self._retrieved_folder_and_drive_ids
except Exception as e:
self.logger.exception(f"Error extracting documents from Google Drive: {e}")
raise e
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GoogleDriveCheckpoint,
) -> CheckpointOutput:
return self._load_from_checkpoint(start, end, checkpoint, include_permissions=False)
@override
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GoogleDriveCheckpoint,
) -> CheckpointOutput:
return self._load_from_checkpoint(start, end, checkpoint, include_permissions=True)
def _extract_slim_docs_from_google_drive(
self,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> GenerateSlimDocumentOutput:
slim_batch = []
for file in self._fetch_drive_items(
field_type=DriveFileFieldType.SLIM,
checkpoint=checkpoint,
start=start,
end=end,
):
if file.error is not None:
raise file.error
if doc := build_slim_document(
self.creds,
file.drive_file,
# for now, always fetch permissions for slim runs
# TODO: move everything to load_from_checkpoint
# and only fetch permissions if needed
PermissionSyncContext(
primary_admin_email=self.primary_admin_email,
google_domain=self.google_domain,
),
):
slim_batch.append(doc)
if len(slim_batch) >= SLIM_BATCH_SIZE:
yield slim_batch
slim_batch = []
if callback:
if callback.should_stop():
raise RuntimeError("_extract_slim_docs_from_google_drive: Stop signal detected")
callback.progress("_extract_slim_docs_from_google_drive", 1)
yield slim_batch
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> GenerateSlimDocumentOutput:
try:
checkpoint = self.build_dummy_checkpoint()
while checkpoint.completion_stage != DriveRetrievalStage.DONE:
yield from self._extract_slim_docs_from_google_drive(
checkpoint=checkpoint,
start=start,
end=end,
)
self.logger.info("Drive perm sync: Slim doc retrieval complete")
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError() from e
raise e
def validate_connector_settings(self) -> None:
if self._creds is None:
raise ConnectorMissingCredentialError("Google Drive credentials not loaded.")
if self._primary_admin_email is None:
raise ConnectorValidationError("Primary admin email not found in credentials. Ensure DB_CREDENTIALS_PRIMARY_ADMIN_KEY is set.")
try:
drive_service = get_drive_service(self._creds, self._primary_admin_email)
drive_service.files().list(pageSize=1, fields="files(id)").execute()
if isinstance(self._creds, ServiceAccountCredentials):
# default is ~17mins of retries, don't do that here since this is called from
# the UI
get_root_folder_id(drive_service)
except HttpError as e:
status_code = e.resp.status if e.resp else None
if status_code == 401:
raise CredentialExpiredError("Invalid or expired Google Drive credentials (401).")
elif status_code == 403:
raise InsufficientPermissionsError("Google Drive app lacks required permissions (403). Please ensure the necessary scopes are granted and Drive apps are enabled.")
else:
raise ConnectorValidationError(f"Unexpected Google Drive error (status={status_code}): {e}")
except Exception as e:
# Check for scope-related hints from the error message
if MISSING_SCOPES_ERROR_STR in str(e):
raise InsufficientPermissionsError("Google Drive credentials are missing required scopes.")
raise ConnectorValidationError(f"Unexpected error during Google Drive validation: {e}")
@override
def build_dummy_checkpoint(self) -> GoogleDriveCheckpoint:
return GoogleDriveCheckpoint(
retrieved_folder_and_drive_ids=set(),
completion_stage=DriveRetrievalStage.START,
completion_map=ThreadSafeDict(),
all_retrieved_file_ids=set(),
has_more=True,
)
@override
def validate_checkpoint_json(self, checkpoint_json: str) -> GoogleDriveCheckpoint:
return GoogleDriveCheckpoint.model_validate_json(checkpoint_json)
class CheckpointOutputWrapper:
"""
Wraps a CheckpointOutput generator to give things back in a more digestible format.
The connector format is easier for the connector implementor (e.g. it enforces exactly
one new checkpoint is returned AND that the checkpoint is at the end), thus the different
formats.
"""
def __init__(self) -> None:
self.next_checkpoint: GoogleDriveCheckpoint | None = None
def __call__(
self,
checkpoint_connector_generator: CheckpointOutput,
) -> Generator[
tuple[Document | None, ConnectorFailure | None, GoogleDriveCheckpoint | None],
None,
None,
]:
# grabs the final return value and stores it in the `next_checkpoint` variable
def _inner_wrapper(
checkpoint_connector_generator: CheckpointOutput,
) -> CheckpointOutput:
self.next_checkpoint = yield from checkpoint_connector_generator
return self.next_checkpoint # not used
for document_or_failure in _inner_wrapper(checkpoint_connector_generator):
if isinstance(document_or_failure, Document):
yield document_or_failure, None, None
elif isinstance(document_or_failure, ConnectorFailure):
yield None, document_or_failure, None
else:
raise ValueError(f"Invalid document_or_failure type: {type(document_or_failure)}")
if self.next_checkpoint is None:
raise RuntimeError("Checkpoint is None. This should never happen - the connector should always return a checkpoint.")
yield None, None, self.next_checkpoint
def yield_all_docs_from_checkpoint_connector(
connector: GoogleDriveConnector,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
) -> Iterator[Document | ConnectorFailure]:
num_iterations = 0
checkpoint = connector.build_dummy_checkpoint()
while checkpoint.has_more:
doc_batch_generator = CheckpointOutputWrapper()(connector.load_from_checkpoint(start, end, checkpoint))
for document, failure, next_checkpoint in doc_batch_generator:
if failure is not None:
yield failure
if document is not None:
yield document
if next_checkpoint is not None:
checkpoint = next_checkpoint
num_iterations += 1
if num_iterations > 100_000:
raise RuntimeError("Too many iterations. Infinite loop?")
if __name__ == "__main__":
import time
from common.data_source.google_util.util import get_credentials_from_env
logging.basicConfig(level=logging.DEBUG)
try:
# Get credentials from environment
email = os.environ.get("GOOGLE_DRIVE_PRIMARY_ADMIN_EMAIL", "yongtengrey@gmail.com")
creds = get_credentials_from_env(email, oauth=True)
print("Credentials loaded successfully")
print(f"{creds=}")
# sys.exit(0)
connector = GoogleDriveConnector(
include_shared_drives=False,
shared_drive_urls=None,
include_my_drives=True,
my_drive_emails="yongtengrey@gmail.com",
shared_folder_urls="https://drive.google.com/drive/folders/1fAKwbmf3U2oM139ZmnOzgIZHGkEwnpfy",
include_files_shared_with_me=False,
specific_user_emails=None,
)
print("GoogleDriveConnector initialized successfully")
connector.load_credentials(creds)
print("Credentials loaded into connector successfully")
print("Google Drive connector is ready to use!")
max_fsize = 0
biggest_fsize = 0
num_errors = 0
docs_processed = 0
start_time = time.time()
with open("stats.txt", "w") as f:
for num, doc_or_failure in enumerate(yield_all_docs_from_checkpoint_connector(connector, 0, time.time())):
if num % 200 == 0:
f.write(f"Processed {num} files\n")
f.write(f"Max file size: {max_fsize / 1000_000:.2f} MB\n")
f.write(f"Time so far: {time.time() - start_time:.2f} seconds\n")
f.write(f"Docs per minute: {num / (time.time() - start_time) * 60:.2f}\n")
biggest_fsize = max(biggest_fsize, max_fsize)
if isinstance(doc_or_failure, Document):
docs_processed += 1
max_fsize = max(max_fsize, doc_or_failure.size_bytes)
print(f"{doc_or_failure=}")
elif isinstance(doc_or_failure, ConnectorFailure):
num_errors += 1
print(f"Num errors: {num_errors}")
print(f"Biggest file size: {biggest_fsize / 1000_000:.2f} MB")
print(f"Time taken: {time.time() - start_time:.2f} seconds")
print(f"Total documents produced: {docs_processed}")
except Exception as e:
print(f"Error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_drive/connector.py",
"license": "Apache License 2.0",
"lines": 1104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_drive/doc_conversion.py | import io
import logging
import mimetypes
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, cast
from urllib.parse import urlparse, urlunparse
from googleapiclient.errors import HttpError # type: ignore # type: ignore
from googleapiclient.http import MediaIoBaseDownload # type: ignore
from pydantic import BaseModel
from common.data_source.config import DocumentSource, FileOrigin
from common.data_source.google_drive.constant import DRIVE_FOLDER_TYPE, DRIVE_SHORTCUT_TYPE
from common.data_source.google_drive.model import GDriveMimeType, GoogleDriveFileType
from common.data_source.google_drive.section_extraction import HEADING_DELIMITER
from common.data_source.google_util.resource import GoogleDriveService, get_drive_service
from common.data_source.models import ConnectorFailure, Document, DocumentFailure, ImageSection, SlimDocument, TextSection
from common.data_source.utils import get_file_ext
# Image types that should be excluded from processing
EXCLUDED_IMAGE_TYPES = [
"image/bmp",
"image/tiff",
"image/gif",
"image/svg+xml",
"image/avif",
]
GOOGLE_MIME_TYPES_TO_EXPORT = {
GDriveMimeType.DOC.value: "text/plain",
GDriveMimeType.SPREADSHEET.value: "text/csv",
GDriveMimeType.PPT.value: "text/plain",
}
GOOGLE_NATIVE_EXPORT_TARGETS: dict[str, tuple[str, str]] = {
GDriveMimeType.DOC.value: ("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx"),
GDriveMimeType.SPREADSHEET.value: ("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx"),
GDriveMimeType.PPT.value: ("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx"),
}
GOOGLE_NATIVE_EXPORT_FALLBACK: tuple[str, str] = ("application/pdf", ".pdf")
ACCEPTED_PLAIN_TEXT_FILE_EXTENSIONS = [
".txt",
".md",
".mdx",
".conf",
".log",
".json",
".csv",
".tsv",
".xml",
".yml",
".yaml",
".sql",
]
ACCEPTED_DOCUMENT_FILE_EXTENSIONS = [
".pdf",
".docx",
".pptx",
".xlsx",
".eml",
".epub",
".html",
]
ACCEPTED_IMAGE_FILE_EXTENSIONS = [
".png",
".jpg",
".jpeg",
".webp",
]
ALL_ACCEPTED_FILE_EXTENSIONS = ACCEPTED_PLAIN_TEXT_FILE_EXTENSIONS + ACCEPTED_DOCUMENT_FILE_EXTENSIONS + ACCEPTED_IMAGE_FILE_EXTENSIONS
MAX_RETRIEVER_EMAILS = 20
CHUNK_SIZE_BUFFER = 64 # extra bytes past the limit to read
# This is not a standard valid Unicode char, it is used by the docs advanced API to
# represent smart chips (elements like dates and doc links).
SMART_CHIP_CHAR = "\ue907"
WEB_VIEW_LINK_KEY = "webViewLink"
# Fallback templates for generating web links when Drive omits webViewLink.
_FALLBACK_WEB_VIEW_LINK_TEMPLATES = {
GDriveMimeType.DOC.value: "https://docs.google.com/document/d/{}/view",
GDriveMimeType.SPREADSHEET.value: "https://docs.google.com/spreadsheets/d/{}/view",
GDriveMimeType.PPT.value: "https://docs.google.com/presentation/d/{}/view",
}
class PermissionSyncContext(BaseModel):
"""
This is the information that is needed to sync permissions for a document.
"""
primary_admin_email: str
google_domain: str
def onyx_document_id_from_drive_file(file: GoogleDriveFileType) -> str:
link = file.get(WEB_VIEW_LINK_KEY)
if not link:
file_id = file.get("id")
if not file_id:
raise KeyError(f"Google Drive file missing both '{WEB_VIEW_LINK_KEY}' and 'id' fields.")
mime_type = file.get("mimeType", "")
template = _FALLBACK_WEB_VIEW_LINK_TEMPLATES.get(mime_type)
if template is None:
link = f"https://drive.google.com/file/d/{file_id}/view"
else:
link = template.format(file_id)
logging.debug(
"Missing webViewLink for Google Drive file with id %s. Falling back to constructed link %s",
file_id,
link,
)
parsed_url = urlparse(link)
parsed_url = parsed_url._replace(query="") # remove query parameters
spl_path = parsed_url.path.split("/")
if spl_path and (spl_path[-1] in ["edit", "view", "preview"]):
spl_path.pop()
parsed_url = parsed_url._replace(path="/".join(spl_path))
# Remove query parameters and reconstruct URL
return urlunparse(parsed_url)
def _find_nth(haystack: str, needle: str, n: int, start: int = 0) -> int:
start = haystack.find(needle, start)
while start >= 0 and n > 1:
start = haystack.find(needle, start + len(needle))
n -= 1
return start
def align_basic_advanced(basic_sections: list[TextSection | ImageSection], adv_sections: list[TextSection]) -> list[TextSection | ImageSection]:
"""Align the basic sections with the advanced sections.
In particular, the basic sections contain all content of the file,
including smart chips like dates and doc links. The advanced sections
are separated by section headers and contain header-based links that
improve user experience when they click on the source in the UI.
There are edge cases in text matching (i.e. the heading is a smart chip or
there is a smart chip in the doc with text containing the actual heading text)
that make the matching imperfect; this is hence done on a best-effort basis.
"""
if len(adv_sections) <= 1:
return basic_sections # no benefit from aligning
basic_full_text = "".join([section.text for section in basic_sections if isinstance(section, TextSection)])
new_sections: list[TextSection | ImageSection] = []
heading_start = 0
for adv_ind in range(1, len(adv_sections)):
heading = adv_sections[adv_ind].text.split(HEADING_DELIMITER)[0]
# retrieve the longest part of the heading that is not a smart chip
heading_key = max(heading.split(SMART_CHIP_CHAR), key=len).strip()
if heading_key == "":
logging.warning(f"Cannot match heading: {heading}, its link will come from the following section")
continue
heading_offset = heading.find(heading_key)
# count occurrences of heading str in previous section
heading_count = adv_sections[adv_ind - 1].text.count(heading_key)
prev_start = heading_start
heading_start = _find_nth(basic_full_text, heading_key, heading_count, start=prev_start) - heading_offset
if heading_start < 0:
logging.warning(f"Heading key {heading_key} from heading {heading} not found in basic text")
heading_start = prev_start
continue
new_sections.append(
TextSection(
link=adv_sections[adv_ind - 1].link,
text=basic_full_text[prev_start:heading_start],
)
)
# handle last section
new_sections.append(TextSection(link=adv_sections[-1].link, text=basic_full_text[heading_start:]))
return new_sections
def is_valid_image_type(mime_type: str) -> bool:
"""
Check if mime_type is a valid image type.
Args:
mime_type: The MIME type to check
Returns:
True if the MIME type is a valid image type, False otherwise
"""
return bool(mime_type) and mime_type.startswith("image/") and mime_type not in EXCLUDED_IMAGE_TYPES
def is_gdrive_image_mime_type(mime_type: str) -> bool:
"""
Return True if the mime_type is a common image type in GDrive.
(e.g. 'image/png', 'image/jpeg')
"""
return is_valid_image_type(mime_type)
def _get_extension_from_file(file: GoogleDriveFileType, mime_type: str, fallback: str = ".bin") -> str:
file_name = file.get("name") or ""
if file_name:
suffix = Path(file_name).suffix
if suffix:
return suffix
file_extension = file.get("fileExtension")
if file_extension:
return f".{file_extension.lstrip('.')}"
guessed = mimetypes.guess_extension(mime_type or "")
if guessed:
return guessed
return fallback
def _download_file_blob(
service: GoogleDriveService,
file: GoogleDriveFileType,
size_threshold: int,
allow_images: bool,
) -> tuple[bytes, str] | None:
mime_type = file.get("mimeType", "")
file_id = file.get("id")
if not file_id:
logging.warning("Encountered Google Drive file without id.")
return None
if is_gdrive_image_mime_type(mime_type) and not allow_images:
logging.debug(f"Skipping image {file.get('name')} because allow_images is False.")
return None
blob: bytes = b""
extension = ".bin"
try:
if mime_type in GOOGLE_NATIVE_EXPORT_TARGETS:
export_mime, extension = GOOGLE_NATIVE_EXPORT_TARGETS[mime_type]
request = service.files().export_media(fileId=file_id, mimeType=export_mime)
blob = _download_request(request, file_id, size_threshold)
elif mime_type.startswith("application/vnd.google-apps"):
export_mime, extension = GOOGLE_NATIVE_EXPORT_FALLBACK
request = service.files().export_media(fileId=file_id, mimeType=export_mime)
blob = _download_request(request, file_id, size_threshold)
else:
extension = _get_extension_from_file(file, mime_type)
blob = download_request(service, file_id, size_threshold)
except HttpError:
raise
if not blob:
return None
if not extension:
extension = _get_extension_from_file(file, mime_type)
return blob, extension
def download_request(service: GoogleDriveService, file_id: str, size_threshold: int) -> bytes:
"""
Download the file from Google Drive.
"""
# For other file types, download the file
# Use the correct API call for downloading files
request = service.files().get_media(fileId=file_id)
return _download_request(request, file_id, size_threshold)
def _download_request(request: Any, file_id: str, size_threshold: int) -> bytes:
response_bytes = io.BytesIO()
downloader = MediaIoBaseDownload(response_bytes, request, chunksize=size_threshold + CHUNK_SIZE_BUFFER)
done = False
while not done:
download_progress, done = downloader.next_chunk()
if download_progress.resumable_progress > size_threshold:
logging.warning(f"File {file_id} exceeds size threshold of {size_threshold}. Skipping2.")
return bytes()
response = response_bytes.getvalue()
if not response:
logging.warning(f"Failed to download {file_id}")
return bytes()
return response
def _download_and_extract_sections_basic(
file: dict[str, str],
service: GoogleDriveService,
allow_images: bool,
size_threshold: int,
) -> list[TextSection | ImageSection]:
"""Extract text and images from a Google Drive file."""
file_id = file["id"]
file_name = file["name"]
mime_type = file["mimeType"]
link = file.get(WEB_VIEW_LINK_KEY, "")
# For non-Google files, download the file
# Use the correct API call for downloading files
# lazy evaluation to only download the file if necessary
def response_call() -> bytes:
return download_request(service, file_id, size_threshold)
if is_gdrive_image_mime_type(mime_type):
# Skip images if not explicitly enabled
if not allow_images:
return []
# Store images for later processing
sections: list[TextSection | ImageSection] = []
def store_image_and_create_section(**kwargs):
pass
try:
section, embedded_id = store_image_and_create_section(
image_data=response_call(),
file_id=file_id,
display_name=file_name,
media_type=mime_type,
file_origin=FileOrigin.CONNECTOR,
link=link,
)
sections.append(section)
except Exception as e:
logging.error(f"Failed to process image {file_name}: {e}")
return sections
# For Google Docs, Sheets, and Slides, export as plain text
if mime_type in GOOGLE_MIME_TYPES_TO_EXPORT:
export_mime_type = GOOGLE_MIME_TYPES_TO_EXPORT[mime_type]
# Use the correct API call for exporting files
request = service.files().export_media(fileId=file_id, mimeType=export_mime_type)
response = _download_request(request, file_id, size_threshold)
if not response:
logging.warning(f"Failed to export {file_name} as {export_mime_type}")
return []
text = response.decode("utf-8")
return [TextSection(link=link, text=text)]
# Process based on mime type
if mime_type == "text/plain":
try:
text = response_call().decode("utf-8")
return [TextSection(link=link, text=text)]
except UnicodeDecodeError as e:
logging.warning(f"Failed to extract text from {file_name}: {e}")
return []
elif mime_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
def docx_to_text_and_images(*args, **kwargs):
return "docx_to_text_and_images"
text, _ = docx_to_text_and_images(io.BytesIO(response_call()))
return [TextSection(link=link, text=text)]
elif mime_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
def xlsx_to_text(*args, **kwargs):
return "xlsx_to_text"
text = xlsx_to_text(io.BytesIO(response_call()), file_name=file_name)
return [TextSection(link=link, text=text)] if text else []
elif mime_type == "application/vnd.openxmlformats-officedocument.presentationml.presentation":
def pptx_to_text(*args, **kwargs):
return "pptx_to_text"
text = pptx_to_text(io.BytesIO(response_call()), file_name=file_name)
return [TextSection(link=link, text=text)] if text else []
elif mime_type == "application/pdf":
def read_pdf_file(*args, **kwargs):
return "read_pdf_file"
text, _pdf_meta, images = read_pdf_file(io.BytesIO(response_call()))
pdf_sections: list[TextSection | ImageSection] = [TextSection(link=link, text=text)]
# Process embedded images in the PDF
try:
for idx, (img_data, img_name) in enumerate(images):
section, embedded_id = store_image_and_create_section(
image_data=img_data,
file_id=f"{file_id}_img_{idx}",
display_name=img_name or f"{file_name} - image {idx}",
file_origin=FileOrigin.CONNECTOR,
)
pdf_sections.append(section)
except Exception as e:
logging.error(f"Failed to process PDF images in {file_name}: {e}")
return pdf_sections
# Final attempt at extracting text
file_ext = get_file_ext(file.get("name", ""))
if file_ext not in ALL_ACCEPTED_FILE_EXTENSIONS:
logging.warning(f"Skipping file {file.get('name')} due to extension.")
return []
try:
def extract_file_text(*args, **kwargs):
return "extract_file_text"
text = extract_file_text(io.BytesIO(response_call()), file_name)
return [TextSection(link=link, text=text)]
except Exception as e:
logging.warning(f"Failed to extract text from {file_name}: {e}")
return []
def _convert_drive_item_to_document(
creds: Any,
allow_images: bool,
size_threshold: int,
retriever_email: str,
file: GoogleDriveFileType,
# if not specified, we will not sync permissions
# will also be a no-op if EE is not enabled
permission_sync_context: PermissionSyncContext | None,
) -> Document | ConnectorFailure | None:
"""
Main entry point for converting a Google Drive file => Document object.
"""
def _get_drive_service() -> GoogleDriveService:
return get_drive_service(creds, user_email=retriever_email)
doc_id = "unknown"
link = file.get(WEB_VIEW_LINK_KEY)
try:
if file.get("mimeType") in [DRIVE_SHORTCUT_TYPE, DRIVE_FOLDER_TYPE]:
logging.info("Skipping shortcut/folder.")
return None
size_str = file.get("size")
if size_str:
try:
size_int = int(size_str)
except ValueError:
logging.warning(f"Parsing string to int failed: size_str={size_str}")
else:
if size_int > size_threshold:
logging.warning(f"{file.get('name')} exceeds size threshold of {size_threshold}. Skipping.")
return None
blob_and_ext = _download_file_blob(
service=_get_drive_service(),
file=file,
size_threshold=size_threshold,
allow_images=allow_images,
)
if blob_and_ext is None:
logging.info(f"Skipping file {file.get('name')} due to incompatible type or download failure.")
return None
blob, extension = blob_and_ext
if not blob:
logging.warning(f"Failed to download {file.get('name')}. Skipping.")
return None
doc_id = onyx_document_id_from_drive_file(file)
modified_time = file.get("modifiedTime")
try:
doc_updated_at = datetime.fromisoformat(modified_time.replace("Z", "+00:00")) if modified_time else datetime.now(timezone.utc)
except ValueError:
logging.warning(f"Failed to parse modifiedTime for {file.get('name')}, defaulting to current time.")
doc_updated_at = datetime.now(timezone.utc)
return Document(
id=doc_id,
source=DocumentSource.GOOGLE_DRIVE,
semantic_identifier=file.get("name", ""),
blob=blob,
extension=extension,
size_bytes=len(blob),
doc_updated_at=doc_updated_at,
)
except Exception as e:
doc_id = "unknown"
try:
doc_id = onyx_document_id_from_drive_file(file)
except Exception as e2:
logging.warning(f"Error getting document id from file: {e2}")
file_name = file.get("name", doc_id)
error_str = f"Error converting file '{file_name}' to Document as {retriever_email}: {e}"
if isinstance(e, HttpError) and e.status_code == 403:
logging.warning(f"Uncommon permissions error while downloading file. User {retriever_email} was able to see file {file_name} but cannot download it.")
logging.warning(error_str)
return ConnectorFailure(
failed_document=DocumentFailure(
document_id=doc_id,
document_link=link,
),
failed_entity=None,
failure_message=error_str,
exception=e,
)
def convert_drive_item_to_document(
creds: Any,
allow_images: bool,
size_threshold: int,
# if not specified, we will not sync permissions
# will also be a no-op if EE is not enabled
permission_sync_context: PermissionSyncContext | None,
retriever_emails: list[str],
file: GoogleDriveFileType,
) -> Document | ConnectorFailure | None:
"""
Attempt to convert a drive item to a document with each retriever email
in order. returns upon a successful retrieval or a non-403 error.
We used to always get the user email from the file owners when available,
but this was causing issues with shared folders where the owner was not included in the service account
now we use the email of the account that successfully listed the file. There are cases where a
user that can list a file cannot download it, so we retry with file owners and admin email.
"""
first_error = None
doc_or_failure = None
retriever_emails = retriever_emails[:MAX_RETRIEVER_EMAILS]
# use seen instead of list(set()) to avoid re-ordering the retriever emails
seen = set()
for retriever_email in retriever_emails:
if retriever_email in seen:
continue
seen.add(retriever_email)
doc_or_failure = _convert_drive_item_to_document(
creds,
allow_images,
size_threshold,
retriever_email,
file,
permission_sync_context,
)
# There are a variety of permissions-based errors that occasionally occur
# when retrieving files. Often when these occur, there is another user
# that can successfully retrieve the file, so we try the next user.
if doc_or_failure is None or isinstance(doc_or_failure, Document) or not (isinstance(doc_or_failure.exception, HttpError) and doc_or_failure.exception.status_code in [401, 403, 404]):
return doc_or_failure
if first_error is None:
first_error = doc_or_failure
else:
first_error.failure_message += f"\n\n{doc_or_failure.failure_message}"
if first_error and isinstance(first_error.exception, HttpError) and first_error.exception.status_code == 403:
# This SHOULD happen very rarely, and we don't want to break the indexing process when
# a high volume of 403s occurs early. We leave a verbose log to help investigate.
logging.error(
f"Skipping file id: {file.get('id')} name: {file.get('name')} due to 403 error.Attempted to retrieve with {retriever_emails},got the following errors: {first_error.failure_message}"
)
return None
return first_error
def build_slim_document(
creds: Any,
file: GoogleDriveFileType,
# if not specified, we will not sync permissions
# will also be a no-op if EE is not enabled
permission_sync_context: PermissionSyncContext | None,
) -> SlimDocument | None:
if file.get("mimeType") in [DRIVE_FOLDER_TYPE, DRIVE_SHORTCUT_TYPE]:
return None
owner_email = cast(str | None, file.get("owners", [{}])[0].get("emailAddress"))
def _get_external_access_for_raw_gdrive_file(*args, **kwargs):
return None
external_access = (
_get_external_access_for_raw_gdrive_file(
file=file,
company_domain=permission_sync_context.google_domain,
retriever_drive_service=(
get_drive_service(
creds,
user_email=owner_email,
)
if owner_email
else None
),
admin_drive_service=get_drive_service(
creds,
user_email=permission_sync_context.primary_admin_email,
),
)
if permission_sync_context
else None
)
return SlimDocument(
id=onyx_document_id_from_drive_file(file),
external_access=external_access,
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_drive/doc_conversion.py",
"license": "Apache License 2.0",
"lines": 510,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_drive/file_retrieval.py | import logging
from collections.abc import Callable, Iterator
from datetime import datetime, timezone
from enum import Enum
from googleapiclient.discovery import Resource # type: ignore
from googleapiclient.errors import HttpError # type: ignore
from common.data_source.google_drive.constant import DRIVE_FOLDER_TYPE, DRIVE_SHORTCUT_TYPE
from common.data_source.google_drive.model import DriveRetrievalStage, GoogleDriveFileType, RetrievedDriveFile
from common.data_source.google_util.resource import GoogleDriveService
from common.data_source.google_util.util import ORDER_BY_KEY, PAGE_TOKEN_KEY, GoogleFields, execute_paginated_retrieval, execute_paginated_retrieval_with_max_pages
from common.data_source.models import SecondsSinceUnixEpoch
PERMISSION_FULL_DESCRIPTION = "permissions(id, emailAddress, type, domain, permissionDetails)"
FILE_FIELDS = "nextPageToken, files(mimeType, id, name, modifiedTime, webViewLink, shortcutDetails, owners(emailAddress), size)"
FILE_FIELDS_WITH_PERMISSIONS = f"nextPageToken, files(mimeType, id, name, {PERMISSION_FULL_DESCRIPTION}, permissionIds, modifiedTime, webViewLink, shortcutDetails, owners(emailAddress), size)"
SLIM_FILE_FIELDS = f"nextPageToken, files(mimeType, driveId, id, name, {PERMISSION_FULL_DESCRIPTION}, permissionIds, webViewLink, owners(emailAddress), modifiedTime)"
FOLDER_FIELDS = "nextPageToken, files(id, name, permissions, modifiedTime, webViewLink, shortcutDetails)"
class DriveFileFieldType(Enum):
"""Enum to specify which fields to retrieve from Google Drive files"""
SLIM = "slim" # Minimal fields for basic file info
STANDARD = "standard" # Standard fields including content metadata
WITH_PERMISSIONS = "with_permissions" # Full fields including permissions
def generate_time_range_filter(
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> str:
time_range_filter = ""
if start is not None:
time_start = datetime.fromtimestamp(start, tz=timezone.utc).isoformat()
time_range_filter += f" and {GoogleFields.MODIFIED_TIME.value} > '{time_start}'"
if end is not None:
time_stop = datetime.fromtimestamp(end, tz=timezone.utc).isoformat()
time_range_filter += f" and {GoogleFields.MODIFIED_TIME.value} <= '{time_stop}'"
return time_range_filter
def _get_folders_in_parent(
service: Resource,
parent_id: str | None = None,
) -> Iterator[GoogleDriveFileType]:
# Follow shortcuts to folders
query = f"(mimeType = '{DRIVE_FOLDER_TYPE}' or mimeType = '{DRIVE_SHORTCUT_TYPE}')"
query += " and trashed = false"
if parent_id:
query += f" and '{parent_id}' in parents"
for file in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=True,
corpora="allDrives",
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields=FOLDER_FIELDS,
q=query,
):
yield file
def _get_fields_for_file_type(field_type: DriveFileFieldType) -> str:
"""Get the appropriate fields string based on the field type enum"""
if field_type == DriveFileFieldType.SLIM:
return SLIM_FILE_FIELDS
elif field_type == DriveFileFieldType.WITH_PERMISSIONS:
return FILE_FIELDS_WITH_PERMISSIONS
else: # DriveFileFieldType.STANDARD
return FILE_FIELDS
def _get_files_in_parent(
service: Resource,
parent_id: str,
field_type: DriveFileFieldType,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[GoogleDriveFileType]:
query = f"mimeType != '{DRIVE_FOLDER_TYPE}' and '{parent_id}' in parents"
query += " and trashed = false"
query += generate_time_range_filter(start, end)
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
for file in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=True,
corpora="allDrives",
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields=_get_fields_for_file_type(field_type),
q=query,
**kwargs,
):
yield file
def crawl_folders_for_files(
service: Resource,
parent_id: str,
field_type: DriveFileFieldType,
user_email: str,
traversed_parent_ids: set[str],
update_traversed_ids_func: Callable[[str], None],
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
"""
This function starts crawling from any folder. It is slower though.
"""
logging.info("Entered crawl_folders_for_files with parent_id: " + parent_id)
if parent_id not in traversed_parent_ids:
logging.info("Parent id not in traversed parent ids, getting files")
found_files = False
file = {}
try:
for file in _get_files_in_parent(
service=service,
parent_id=parent_id,
field_type=field_type,
start=start,
end=end,
):
logging.info(f"Found file: {file['name']}, user email: {user_email}")
found_files = True
yield RetrievedDriveFile(
drive_file=file,
user_email=user_email,
parent_id=parent_id,
completion_stage=DriveRetrievalStage.FOLDER_FILES,
)
# Only mark a folder as done if it was fully traversed without errors
# This usually indicates that the owner of the folder was impersonated.
# In cases where this never happens, most likely the folder owner is
# not part of the Google Workspace in question (or for oauth, the authenticated
# user doesn't own the folder)
if found_files:
update_traversed_ids_func(parent_id)
except Exception as e:
if isinstance(e, HttpError) and e.status_code == 403:
# don't yield an error here because this is expected behavior
# when a user doesn't have access to a folder
logging.debug(f"Error getting files in parent {parent_id}: {e}")
else:
logging.error(f"Error getting files in parent {parent_id}: {e}")
yield RetrievedDriveFile(
drive_file=file,
user_email=user_email,
parent_id=parent_id,
completion_stage=DriveRetrievalStage.FOLDER_FILES,
error=e,
)
else:
logging.info(f"Skipping subfolder files since already traversed: {parent_id}")
for subfolder in _get_folders_in_parent(
service=service,
parent_id=parent_id,
):
logging.info("Fetching all files in subfolder: " + subfolder["name"])
yield from crawl_folders_for_files(
service=service,
parent_id=subfolder["id"],
field_type=field_type,
user_email=user_email,
traversed_parent_ids=traversed_parent_ids,
update_traversed_ids_func=update_traversed_ids_func,
start=start,
end=end,
)
def get_files_in_shared_drive(
service: Resource,
drive_id: str,
field_type: DriveFileFieldType,
max_num_pages: int,
update_traversed_ids_func: Callable[[str], None] = lambda _: None,
cache_folders: bool = True,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
page_token: str | None = None,
) -> Iterator[GoogleDriveFileType | str]:
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
if page_token:
logging.info(f"Using page token: {page_token}")
kwargs[PAGE_TOKEN_KEY] = page_token
if cache_folders:
# If we know we are going to folder crawl later, we can cache the folders here
# Get all folders being queried and add them to the traversed set
folder_query = f"mimeType = '{DRIVE_FOLDER_TYPE}'"
folder_query += " and trashed = false"
for folder in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=True,
corpora="drive",
driveId=drive_id,
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields="nextPageToken, files(id)",
q=folder_query,
):
update_traversed_ids_func(folder["id"])
# Get all files in the shared drive
file_query = f"mimeType != '{DRIVE_FOLDER_TYPE}'"
file_query += " and trashed = false"
file_query += generate_time_range_filter(start, end)
for file in execute_paginated_retrieval_with_max_pages(
retrieval_function=service.files().list,
max_num_pages=max_num_pages,
list_key="files",
continue_on_404_or_403=True,
corpora="drive",
driveId=drive_id,
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields=_get_fields_for_file_type(field_type),
q=file_query,
**kwargs,
):
# If we found any files, mark this drive as traversed. When a user has access to a drive,
# they have access to all the files in the drive. Also, not a huge deal if we re-traverse
# empty drives.
# NOTE: ^^ the above is not actually true due to folder restrictions:
# https://support.google.com/a/users/answer/12380484?hl=en
# So we may have to change this logic for people who use folder restrictions.
update_traversed_ids_func(drive_id)
yield file
def get_all_files_in_my_drive_and_shared(
service: GoogleDriveService,
update_traversed_ids_func: Callable,
field_type: DriveFileFieldType,
include_shared_with_me: bool,
max_num_pages: int,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
cache_folders: bool = True,
page_token: str | None = None,
) -> Iterator[GoogleDriveFileType | str]:
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
if page_token:
logging.info(f"Using page token: {page_token}")
kwargs[PAGE_TOKEN_KEY] = page_token
if cache_folders:
# If we know we are going to folder crawl later, we can cache the folders here
# Get all folders being queried and add them to the traversed set
folder_query = f"mimeType = '{DRIVE_FOLDER_TYPE}'"
folder_query += " and trashed = false"
if not include_shared_with_me:
folder_query += " and 'me' in owners"
found_folders = False
for folder in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
corpora="user",
fields=_get_fields_for_file_type(field_type),
q=folder_query,
):
update_traversed_ids_func(folder[GoogleFields.ID])
found_folders = True
if found_folders:
update_traversed_ids_func(get_root_folder_id(service))
# Then get the files
file_query = f"mimeType != '{DRIVE_FOLDER_TYPE}'"
file_query += " and trashed = false"
if not include_shared_with_me:
file_query += " and 'me' in owners"
file_query += generate_time_range_filter(start, end)
yield from execute_paginated_retrieval_with_max_pages(
retrieval_function=service.files().list,
max_num_pages=max_num_pages,
list_key="files",
continue_on_404_or_403=False,
corpora="user",
fields=_get_fields_for_file_type(field_type),
q=file_query,
**kwargs,
)
def get_all_files_for_oauth(
service: GoogleDriveService,
include_files_shared_with_me: bool,
include_my_drives: bool,
# One of the above 2 should be true
include_shared_drives: bool,
field_type: DriveFileFieldType,
max_num_pages: int,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
page_token: str | None = None,
) -> Iterator[GoogleDriveFileType | str]:
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
if page_token:
logging.info(f"Using page token: {page_token}")
kwargs[PAGE_TOKEN_KEY] = page_token
should_get_all = include_shared_drives and include_my_drives and include_files_shared_with_me
corpora = "allDrives" if should_get_all else "user"
file_query = f"mimeType != '{DRIVE_FOLDER_TYPE}'"
file_query += " and trashed = false"
file_query += generate_time_range_filter(start, end)
if not should_get_all:
if include_files_shared_with_me and not include_my_drives:
file_query += " and not 'me' in owners"
if not include_files_shared_with_me and include_my_drives:
file_query += " and 'me' in owners"
yield from execute_paginated_retrieval_with_max_pages(
max_num_pages=max_num_pages,
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=False,
corpora=corpora,
includeItemsFromAllDrives=should_get_all,
supportsAllDrives=should_get_all,
fields=_get_fields_for_file_type(field_type),
q=file_query,
**kwargs,
)
# Just in case we need to get the root folder id
def get_root_folder_id(service: Resource) -> str:
# we don't paginate here because there is only one root folder per user
# https://developers.google.com/drive/api/guides/v2-to-v3-reference
return service.files().get(fileId="root", fields=GoogleFields.ID.value).execute()[GoogleFields.ID.value]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_drive/file_retrieval.py",
"license": "Apache License 2.0",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_drive/model.py | from enum import Enum
from typing import Any
from pydantic import BaseModel, ConfigDict, field_serializer, field_validator
from common.data_source.google_util.util_threadpool_concurrency import ThreadSafeDict
from common.data_source.models import ConnectorCheckpoint, SecondsSinceUnixEpoch
GoogleDriveFileType = dict[str, Any]
class GDriveMimeType(str, Enum):
DOC = "application/vnd.google-apps.document"
SPREADSHEET = "application/vnd.google-apps.spreadsheet"
SPREADSHEET_OPEN_FORMAT = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
SPREADSHEET_MS_EXCEL = "application/vnd.ms-excel"
PDF = "application/pdf"
WORD_DOC = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
PPT = "application/vnd.google-apps.presentation"
POWERPOINT = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
PLAIN_TEXT = "text/plain"
MARKDOWN = "text/markdown"
# These correspond to The major stages of retrieval for Google Drive.
# The stages for the oauth flow are:
# get_all_files_for_oauth(),
# get_all_drive_ids(),
# get_files_in_shared_drive(),
# crawl_folders_for_files()
#
# The stages for the service account flow are roughly:
# get_all_user_emails(),
# get_all_drive_ids(),
# get_files_in_shared_drive(),
# Then for each user:
# get_files_in_my_drive()
# get_files_in_shared_drive()
# crawl_folders_for_files()
class DriveRetrievalStage(str, Enum):
START = "start"
DONE = "done"
# OAuth specific stages
OAUTH_FILES = "oauth_files"
# Service account specific stages
USER_EMAILS = "user_emails"
MY_DRIVE_FILES = "my_drive_files"
# Used for both oauth and service account flows
DRIVE_IDS = "drive_ids"
SHARED_DRIVE_FILES = "shared_drive_files"
FOLDER_FILES = "folder_files"
class StageCompletion(BaseModel):
"""
Describes the point in the retrieval+indexing process that the
connector is at. completed_until is the timestamp of the latest
file that has been retrieved or error that has been yielded.
Optional fields are used for retrieval stages that need more information
for resuming than just the timestamp of the latest file.
"""
stage: DriveRetrievalStage
completed_until: SecondsSinceUnixEpoch
current_folder_or_drive_id: str | None = None
next_page_token: str | None = None
# only used for shared drives
processed_drive_ids: set[str] = set()
def update(
self,
stage: DriveRetrievalStage,
completed_until: SecondsSinceUnixEpoch,
current_folder_or_drive_id: str | None = None,
) -> None:
self.stage = stage
self.completed_until = completed_until
self.current_folder_or_drive_id = current_folder_or_drive_id
class GoogleDriveCheckpoint(ConnectorCheckpoint):
# Checkpoint version of _retrieved_ids
retrieved_folder_and_drive_ids: set[str]
# Describes the point in the retrieval+indexing process that the
# checkpoint is at. when this is set to a given stage, the connector
# has finished yielding all values from the previous stage.
completion_stage: DriveRetrievalStage
# The latest timestamp of a file that has been retrieved per user email.
# StageCompletion is used to track the completion of each stage, but the
# timestamp part is not used for folder crawling.
completion_map: ThreadSafeDict[str, StageCompletion]
# all file ids that have been retrieved
all_retrieved_file_ids: set[str] = set()
# cached version of the drive and folder ids to retrieve
drive_ids_to_retrieve: list[str] | None = None
folder_ids_to_retrieve: list[str] | None = None
# cached user emails
user_emails: list[str] | None = None
@field_serializer("completion_map")
def serialize_completion_map(self, completion_map: ThreadSafeDict[str, StageCompletion], _info: Any) -> dict[str, StageCompletion]:
return completion_map._dict
@field_validator("completion_map", mode="before")
def validate_completion_map(cls, v: Any) -> ThreadSafeDict[str, StageCompletion]:
assert isinstance(v, dict) or isinstance(v, ThreadSafeDict)
return ThreadSafeDict({k: StageCompletion.model_validate(val) for k, val in v.items()})
class RetrievedDriveFile(BaseModel):
"""
Describes a file that has been retrieved from Google Drive.
user_email is the email of the user that the file was retrieved
by impersonating. If an error worthy of being reported is encountered,
error should be set and later propagated as a ConnectorFailure.
"""
# The stage at which this file was retrieved
completion_stage: DriveRetrievalStage
# The file that was retrieved
drive_file: GoogleDriveFileType
# The email of the user that the file was retrieved by impersonating
user_email: str
# The id of the parent folder or drive of the file
parent_id: str | None = None
# Any unexpected error that occurred while retrieving the file.
# In particular, this is not used for 403/404 errors, which are expected
# in the context of impersonating all the users to try to retrieve all
# files from all their Drives and Folders.
error: Exception | None = None
model_config = ConfigDict(arbitrary_types_allowed=True)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_drive/model.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_drive/section_extraction.py | from typing import Any
from pydantic import BaseModel
from common.data_source.google_util.resource import GoogleDocsService
from common.data_source.models import TextSection
HEADING_DELIMITER = "\n"
class CurrentHeading(BaseModel):
id: str | None
text: str
def get_document_sections(
docs_service: GoogleDocsService,
doc_id: str,
) -> list[TextSection]:
"""Extracts sections from a Google Doc, including their headings and content"""
# Fetch the document structure
http_request = docs_service.documents().get(documentId=doc_id)
# Google has poor support for tabs in the docs api, see
# https://cloud.google.com/python/docs/reference/cloudtasks/
# latest/google.cloud.tasks_v2.types.HttpRequest
# https://developers.google.com/workspace/docs/api/how-tos/tabs
# https://developers.google.com/workspace/docs/api/reference/rest/v1/documents/get
# this is a hack to use the param mentioned in the rest api docs
# TODO: check if it can be specified i.e. in documents()
http_request.uri += "&includeTabsContent=true"
doc = http_request.execute()
# Get the content
tabs = doc.get("tabs", {})
sections: list[TextSection] = []
for tab in tabs:
sections.extend(get_tab_sections(tab, doc_id))
return sections
def _is_heading(paragraph: dict[str, Any]) -> bool:
"""Checks if a paragraph (a block of text in a drive document) is a heading"""
if not ("paragraphStyle" in paragraph and "namedStyleType" in paragraph["paragraphStyle"]):
return False
style = paragraph["paragraphStyle"]["namedStyleType"]
is_heading = style.startswith("HEADING_")
is_title = style.startswith("TITLE")
return is_heading or is_title
def _add_finished_section(
sections: list[TextSection],
doc_id: str,
tab_id: str,
current_heading: CurrentHeading,
current_section: list[str],
) -> None:
"""Adds a finished section to the list of sections if the section has content.
Returns the list of sections to use going forward, which may be the old list
if a new section was not added.
"""
if not (current_section or current_heading.text):
return
# If we were building a previous section, add it to sections list
# this is unlikely to ever matter, but helps if the doc contains weird headings
header_text = current_heading.text.replace(HEADING_DELIMITER, "")
section_text = f"{header_text}{HEADING_DELIMITER}" + "\n".join(current_section)
sections.append(
TextSection(
text=section_text.strip(),
link=_build_gdoc_section_link(doc_id, tab_id, current_heading.id),
)
)
def _build_gdoc_section_link(doc_id: str, tab_id: str, heading_id: str | None) -> str:
"""Builds a Google Doc link that jumps to a specific heading"""
# NOTE: doesn't support docs with multiple tabs atm, if we need that ask
# @Chris
heading_str = f"#heading={heading_id}" if heading_id else ""
return f"https://docs.google.com/document/d/{doc_id}/edit?tab={tab_id}{heading_str}"
def _extract_id_from_heading(paragraph: dict[str, Any]) -> str:
"""Extracts the id from a heading paragraph element"""
return paragraph["paragraphStyle"]["headingId"]
def _extract_text_from_paragraph(paragraph: dict[str, Any]) -> str:
"""Extracts the text content from a paragraph element"""
text_elements = []
for element in paragraph.get("elements", []):
if "textRun" in element:
text_elements.append(element["textRun"].get("content", ""))
# Handle links
if "textStyle" in element and "link" in element["textStyle"]:
text_elements.append(f"({element['textStyle']['link'].get('url', '')})")
if "person" in element:
name = element["person"].get("personProperties", {}).get("name", "")
email = element["person"].get("personProperties", {}).get("email", "")
person_str = "<Person|"
if name:
person_str += f"name: {name}, "
if email:
person_str += f"email: {email}"
person_str += ">"
text_elements.append(person_str)
if "richLink" in element:
props = element["richLink"].get("richLinkProperties", {})
title = props.get("title", "")
uri = props.get("uri", "")
link_str = f"[{title}]({uri})"
text_elements.append(link_str)
return "".join(text_elements)
def _extract_text_from_table(table: dict[str, Any]) -> str:
"""
Extracts the text content from a table element.
"""
row_strs = []
for row in table.get("tableRows", []):
cells = row.get("tableCells", [])
cell_strs = []
for cell in cells:
child_elements = cell.get("content", {})
cell_str = []
for child_elem in child_elements:
if "paragraph" not in child_elem:
continue
cell_str.append(_extract_text_from_paragraph(child_elem["paragraph"]))
cell_strs.append("".join(cell_str))
row_strs.append(", ".join(cell_strs))
return "\n".join(row_strs)
def get_tab_sections(tab: dict[str, Any], doc_id: str) -> list[TextSection]:
tab_id = tab["tabProperties"]["tabId"]
content = tab.get("documentTab", {}).get("body", {}).get("content", [])
sections: list[TextSection] = []
current_section: list[str] = []
current_heading = CurrentHeading(id=None, text="")
for element in content:
if "paragraph" in element:
paragraph = element["paragraph"]
# If this is not a heading, add content to current section
if not _is_heading(paragraph):
text = _extract_text_from_paragraph(paragraph)
if text.strip():
current_section.append(text)
continue
_add_finished_section(sections, doc_id, tab_id, current_heading, current_section)
current_section = []
# Start new heading
heading_id = _extract_id_from_heading(paragraph)
heading_text = _extract_text_from_paragraph(paragraph)
current_heading = CurrentHeading(
id=heading_id,
text=heading_text,
)
elif "table" in element:
text = _extract_text_from_table(element["table"])
if text.strip():
current_section.append(text)
# Don't forget to add the last section
_add_finished_section(sections, doc_id, tab_id, current_heading, current_section)
return sections
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_drive/section_extraction.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_util/auth.py | import json
import logging
from typing import Any
from google.auth.transport.requests import Request # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore # type: ignore
from common.data_source.config import OAUTH_GOOGLE_DRIVE_CLIENT_ID, OAUTH_GOOGLE_DRIVE_CLIENT_SECRET, DocumentSource
from common.data_source.google_util.constant import (
DB_CREDENTIALS_AUTHENTICATION_METHOD,
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY,
DB_CREDENTIALS_DICT_TOKEN_KEY,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY,
GOOGLE_SCOPES,
GoogleOAuthAuthenticationMethod,
)
from common.data_source.google_util.oauth_flow import ensure_oauth_token_dict
def sanitize_oauth_credentials(oauth_creds: OAuthCredentials) -> str:
"""we really don't want to be persisting the client id and secret anywhere but the
environment.
Returns a string of serialized json.
"""
# strip the client id and secret
oauth_creds_json_str = oauth_creds.to_json()
oauth_creds_sanitized_json: dict[str, Any] = json.loads(oauth_creds_json_str)
oauth_creds_sanitized_json.pop("client_id", None)
oauth_creds_sanitized_json.pop("client_secret", None)
oauth_creds_sanitized_json_str = json.dumps(oauth_creds_sanitized_json)
return oauth_creds_sanitized_json_str
def get_google_creds(
credentials: dict[str, str],
source: DocumentSource,
) -> tuple[ServiceAccountCredentials | OAuthCredentials, dict[str, str] | None]:
"""Checks for two different types of credentials.
(1) A credential which holds a token acquired via a user going through
the Google OAuth flow.
(2) A credential which holds a service account key JSON file, which
can then be used to impersonate any user in the workspace.
Return a tuple where:
The first element is the requested credentials
The second element is a new credentials dict that the caller should write back
to the db. This happens if token rotation occurs while loading credentials.
"""
oauth_creds = None
service_creds = None
new_creds_dict = None
if DB_CREDENTIALS_DICT_TOKEN_KEY in credentials:
# OAUTH
authentication_method: str = credentials.get(
DB_CREDENTIALS_AUTHENTICATION_METHOD,
GoogleOAuthAuthenticationMethod.UPLOADED,
)
credentials_dict_str = credentials[DB_CREDENTIALS_DICT_TOKEN_KEY]
credentials_dict = json.loads(credentials_dict_str)
regenerated_from_client_secret = False
if "client_id" not in credentials_dict or "client_secret" not in credentials_dict or "refresh_token" not in credentials_dict:
try:
credentials_dict = ensure_oauth_token_dict(credentials_dict, source)
except Exception as exc:
raise PermissionError(
"Google Drive OAuth credentials are incomplete. Please finish the OAuth flow to generate access tokens."
) from exc
credentials_dict_str = json.dumps(credentials_dict)
regenerated_from_client_secret = True
# only send what get_google_oauth_creds needs
authorized_user_info = {}
# oauth_interactive is sanitized and needs credentials from the environment
if authentication_method == GoogleOAuthAuthenticationMethod.OAUTH_INTERACTIVE:
authorized_user_info["client_id"] = OAUTH_GOOGLE_DRIVE_CLIENT_ID
authorized_user_info["client_secret"] = OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
else:
authorized_user_info["client_id"] = credentials_dict["client_id"]
authorized_user_info["client_secret"] = credentials_dict["client_secret"]
authorized_user_info["refresh_token"] = credentials_dict["refresh_token"]
authorized_user_info["token"] = credentials_dict["token"]
authorized_user_info["expiry"] = credentials_dict["expiry"]
token_json_str = json.dumps(authorized_user_info)
oauth_creds = get_google_oauth_creds(token_json_str=token_json_str, source=source)
# tell caller to update token stored in DB if the refresh token changed
if oauth_creds:
should_persist = regenerated_from_client_secret or oauth_creds.refresh_token != authorized_user_info["refresh_token"]
if should_persist:
# if oauth_interactive, sanitize the credentials so they don't get stored in the db
if authentication_method == GoogleOAuthAuthenticationMethod.OAUTH_INTERACTIVE:
oauth_creds_json_str = sanitize_oauth_credentials(oauth_creds)
else:
oauth_creds_json_str = oauth_creds.to_json()
new_creds_dict = {
DB_CREDENTIALS_DICT_TOKEN_KEY: oauth_creds_json_str,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY: credentials[DB_CREDENTIALS_PRIMARY_ADMIN_KEY],
DB_CREDENTIALS_AUTHENTICATION_METHOD: authentication_method,
}
elif DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY in credentials:
# SERVICE ACCOUNT
service_account_key_json_str = credentials[DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY]
service_account_key = json.loads(service_account_key_json_str)
service_creds = ServiceAccountCredentials.from_service_account_info(service_account_key, scopes=GOOGLE_SCOPES[source])
if not service_creds.valid or not service_creds.expired:
service_creds.refresh(Request())
if not service_creds.valid:
raise PermissionError(f"Unable to access {source} - service account credentials are invalid.")
creds: ServiceAccountCredentials | OAuthCredentials | None = oauth_creds or service_creds
if creds is None:
raise PermissionError(f"Unable to access {source} - unknown credential structure.")
return creds, new_creds_dict
def get_google_oauth_creds(token_json_str: str, source: DocumentSource) -> OAuthCredentials | None:
"""creds_json only needs to contain client_id, client_secret and refresh_token to
refresh the creds.
expiry and token are optional ... however, if passing in expiry, token
should also be passed in or else we may not return any creds.
(probably a sign we should refactor the function)
"""
creds_json = json.loads(token_json_str)
creds = OAuthCredentials.from_authorized_user_info(
info=creds_json,
scopes=GOOGLE_SCOPES[source],
)
if creds.valid:
return creds
if creds.expired and creds.refresh_token:
try:
creds.refresh(Request())
if creds.valid:
logging.info("Refreshed Google Drive tokens.")
return creds
except Exception:
logging.exception("Failed to refresh google drive access token")
return None
return None
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_util/auth.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_util/constant.py | from enum import Enum
from common.data_source.config import DocumentSource
SLIM_BATCH_SIZE = 500
# NOTE: do not need https://www.googleapis.com/auth/documents.readonly
# this is counted under `/auth/drive.readonly`
GOOGLE_SCOPES = {
DocumentSource.GOOGLE_DRIVE: [
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/drive.metadata.readonly",
"https://www.googleapis.com/auth/admin.directory.group.readonly",
"https://www.googleapis.com/auth/admin.directory.user.readonly",
],
DocumentSource.GMAIL: [
"https://www.googleapis.com/auth/gmail.readonly",
"https://www.googleapis.com/auth/admin.directory.user.readonly",
"https://www.googleapis.com/auth/admin.directory.group.readonly",
],
}
# This is the Oauth token
DB_CREDENTIALS_DICT_TOKEN_KEY = "google_tokens"
# This is the service account key
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY = "google_service_account_key"
# The email saved for both auth types
DB_CREDENTIALS_PRIMARY_ADMIN_KEY = "google_primary_admin"
# https://developers.google.com/workspace/guides/create-credentials
# Internally defined authentication method type.
# The value must be one of "oauth_interactive" or "uploaded"
# Used to disambiguate whether credentials have already been created via
# certain methods and what actions we allow users to take
DB_CREDENTIALS_AUTHENTICATION_METHOD = "authentication_method"
class GoogleOAuthAuthenticationMethod(str, Enum):
OAUTH_INTERACTIVE = "oauth_interactive"
UPLOADED = "uploaded"
USER_FIELDS = "nextPageToken, users(primaryEmail)"
# Error message substrings
MISSING_SCOPES_ERROR_STR = "client not authorized for any of the scopes requested"
SCOPE_INSTRUCTIONS = ""
WEB_OAUTH_POPUP_TEMPLATE = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>{title}</title>
<style>
body {{
font-family: Arial, sans-serif;
background: #f8fafc;
color: #0f172a;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
min-height: 100vh;
margin: 0;
}}
.card {{
background: white;
padding: 32px;
border-radius: 12px;
box-shadow: 0 8px 30px rgba(15, 23, 42, 0.1);
max-width: 420px;
text-align: center;
}}
h1 {{
font-size: 1.5rem;
margin-bottom: 12px;
}}
p {{
font-size: 0.95rem;
line-height: 1.5;
}}
</style>
</head>
<body>
<div class="card">
<h1>{heading}</h1>
<p>{message}</p>
<p>You can close this window.</p>
</div>
<script>
(function(){{
if (window.opener) {{
window.opener.postMessage({payload_json}, "*");
}}
{auto_close}
}})();
</script>
</body>
</html>
"""
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_util/constant.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
infiniflow/ragflow:common/data_source/google_util/oauth_flow.py | import json
import os
import threading
from typing import Any, Callable
from common.data_source.config import DocumentSource
from common.data_source.google_util.constant import GOOGLE_SCOPES
def _get_requested_scopes(source: DocumentSource) -> list[str]:
"""Return the scopes to request, honoring an optional override env var."""
override = os.environ.get("GOOGLE_OAUTH_SCOPE_OVERRIDE", "")
if override.strip():
scopes = [scope.strip() for scope in override.split(",") if scope.strip()]
if scopes:
return scopes
return GOOGLE_SCOPES[source]
def _get_oauth_timeout_secs() -> int:
raw_timeout = os.environ.get("GOOGLE_OAUTH_FLOW_TIMEOUT_SECS", "300").strip()
try:
timeout = int(raw_timeout)
except ValueError:
timeout = 300
return timeout
def _run_with_timeout(func: Callable[[], Any], timeout_secs: int, timeout_message: str) -> Any:
if timeout_secs <= 0:
return func()
result: dict[str, Any] = {}
error: dict[str, BaseException] = {}
def _target() -> None:
try:
result["value"] = func()
except BaseException as exc: # pragma: no cover
error["error"] = exc
thread = threading.Thread(target=_target, daemon=True)
thread.start()
thread.join(timeout_secs)
if thread.is_alive():
raise TimeoutError(timeout_message)
if "error" in error:
raise error["error"]
return result.get("value")
def _run_local_server_flow(client_config: dict[str, Any], source: DocumentSource) -> dict[str, Any]:
"""Launch the standard Google OAuth local-server flow to mint user tokens."""
from google_auth_oauthlib.flow import InstalledAppFlow # type: ignore
scopes = _get_requested_scopes(source)
flow = InstalledAppFlow.from_client_config(
client_config,
scopes=scopes,
)
open_browser = os.environ.get("GOOGLE_OAUTH_OPEN_BROWSER", "true").lower() != "false"
preferred_port = os.environ.get("GOOGLE_OAUTH_LOCAL_SERVER_PORT")
port = int(preferred_port) if preferred_port else 0
timeout_secs = _get_oauth_timeout_secs()
timeout_message = f"Google OAuth verification timed out after {timeout_secs} seconds. Close any pending consent windows and rerun the connector configuration to try again."
print("Launching Google OAuth flow. A browser window should open shortly.")
print("If it does not, copy the URL shown in the console into your browser manually.")
if timeout_secs > 0:
print(f"You have {timeout_secs} seconds to finish granting access before the request times out.")
try:
creds = _run_with_timeout(
lambda: flow.run_local_server(port=port, open_browser=open_browser, prompt="consent"),
timeout_secs,
timeout_message,
)
except OSError as exc:
allow_console = os.environ.get("GOOGLE_OAUTH_ALLOW_CONSOLE_FALLBACK", "true").lower() != "false"
if not allow_console:
raise
print(f"Local server flow failed ({exc}). Falling back to console-based auth.")
creds = _run_with_timeout(flow.run_console, timeout_secs, timeout_message)
except Warning as warning:
warning_msg = str(warning)
if "Scope has changed" in warning_msg:
instructions = [
"Google rejected one or more of the requested OAuth scopes.",
"Fix options:",
" 1. In Google Cloud Console, open APIs & Services > OAuth consent screen and add the missing scopes (Drive metadata + Admin Directory read scopes), then re-run the flow.",
" 2. Set GOOGLE_OAUTH_SCOPE_OVERRIDE to a comma-separated list of scopes you are allowed to request.",
]
raise RuntimeError("\n".join(instructions)) from warning
raise
token_dict: dict[str, Any] = json.loads(creds.to_json())
print("\nGoogle OAuth flow completed successfully.")
print("Copy the JSON blob below into GOOGLE_DRIVE_OAUTH_CREDENTIALS_JSON_STR to reuse these tokens without re-authenticating:\n")
print(json.dumps(token_dict, indent=2))
print()
return token_dict
def ensure_oauth_token_dict(credentials: dict[str, Any], source: DocumentSource) -> dict[str, Any]:
"""Return a dict that contains OAuth tokens, running the flow if only a client config is provided."""
if "refresh_token" in credentials and "token" in credentials:
return credentials
client_config: dict[str, Any] | None = None
if "installed" in credentials:
client_config = {"installed": credentials["installed"]}
elif "web" in credentials:
client_config = {"web": credentials["web"]}
if client_config is None:
raise ValueError("Provided Google OAuth credentials are missing both tokens and a client configuration.")
return _run_local_server_flow(client_config, source)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_util/oauth_flow.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_util/resource.py | import logging
from collections.abc import Callable
from typing import Any
from google.auth.exceptions import RefreshError # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore # type: ignore
from googleapiclient.discovery import (
Resource, # type: ignore
build, # type: ignore
)
class GoogleDriveService(Resource):
pass
class GoogleDocsService(Resource):
pass
class AdminService(Resource):
pass
class GmailService(Resource):
pass
class RefreshableDriveObject:
"""
Running Google Drive service retrieval functions
involves accessing methods of the service object (i.e. files().list())
which can raise a RefreshError if the access token is expired.
This class is a wrapper that propagates the ability to refresh the access token
and retry the final retrieval function until execute() is called.
"""
def __init__(
self,
call_stack: Callable[[ServiceAccountCredentials | OAuthCredentials], Any],
creds: ServiceAccountCredentials | OAuthCredentials,
creds_getter: Callable[..., ServiceAccountCredentials | OAuthCredentials],
):
self.call_stack = call_stack
self.creds = creds
self.creds_getter = creds_getter
def __getattr__(self, name: str) -> Any:
if name == "execute":
return self.make_refreshable_execute()
return RefreshableDriveObject(
lambda creds: getattr(self.call_stack(creds), name),
self.creds,
self.creds_getter,
)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return RefreshableDriveObject(
lambda creds: self.call_stack(creds)(*args, **kwargs),
self.creds,
self.creds_getter,
)
def make_refreshable_execute(self) -> Callable:
def execute(*args: Any, **kwargs: Any) -> Any:
try:
return self.call_stack(self.creds).execute(*args, **kwargs)
except RefreshError as e:
logging.warning(f"RefreshError, going to attempt a creds refresh and retry: {e}")
# Refresh the access token
self.creds = self.creds_getter()
return self.call_stack(self.creds).execute(*args, **kwargs)
return execute
def _get_google_service(
service_name: str,
service_version: str,
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GoogleDriveService | GoogleDocsService | AdminService | GmailService:
service: Resource
if isinstance(creds, ServiceAccountCredentials):
# NOTE: https://developers.google.com/identity/protocols/oauth2/service-account#error-codes
creds = creds.with_subject(user_email)
service = build(service_name, service_version, credentials=creds)
elif isinstance(creds, OAuthCredentials):
service = build(service_name, service_version, credentials=creds)
return service
def get_google_docs_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GoogleDocsService:
return _get_google_service("docs", "v1", creds, user_email)
def get_drive_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GoogleDriveService:
return _get_google_service("drive", "v3", creds, user_email)
def get_admin_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> AdminService:
return _get_google_service("admin", "directory_v1", creds, user_email)
def get_gmail_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GmailService:
return _get_google_service("gmail", "v1", creds, user_email)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_util/resource.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/google_util/util.py | import json
import logging
import os
import re
import socket
from collections.abc import Callable, Iterator
from enum import Enum
from typing import Any
import unicodedata
from googleapiclient.errors import HttpError # type: ignore # type: ignore
from common.data_source.config import DocumentSource
from common.data_source.google_drive.model import GoogleDriveFileType
from common.data_source.google_util.oauth_flow import ensure_oauth_token_dict
# See https://developers.google.com/drive/api/reference/rest/v3/files/list for more
class GoogleFields(str, Enum):
ID = "id"
CREATED_TIME = "createdTime"
MODIFIED_TIME = "modifiedTime"
NAME = "name"
SIZE = "size"
PARENTS = "parents"
NEXT_PAGE_TOKEN_KEY = "nextPageToken"
PAGE_TOKEN_KEY = "pageToken"
ORDER_BY_KEY = "orderBy"
def get_file_owners(file: GoogleDriveFileType, primary_admin_email: str) -> list[str]:
"""
Get the owners of a file if the attribute is present.
"""
return [email for owner in file.get("owners", []) if (email := owner.get("emailAddress")) and email.split("@")[-1] == primary_admin_email.split("@")[-1]]
# included for type purposes; caller should not need to address
# Nones unless max_num_pages is specified. Use
# execute_paginated_retrieval_with_max_pages instead if you want
# the early stop + yield None after max_num_pages behavior.
def execute_paginated_retrieval(
retrieval_function: Callable,
list_key: str | None = None,
continue_on_404_or_403: bool = False,
**kwargs: Any,
) -> Iterator[GoogleDriveFileType]:
for item in _execute_paginated_retrieval(
retrieval_function,
list_key,
continue_on_404_or_403,
**kwargs,
):
if not isinstance(item, str):
yield item
def execute_paginated_retrieval_with_max_pages(
retrieval_function: Callable,
max_num_pages: int,
list_key: str | None = None,
continue_on_404_or_403: bool = False,
**kwargs: Any,
) -> Iterator[GoogleDriveFileType | str]:
yield from _execute_paginated_retrieval(
retrieval_function,
list_key,
continue_on_404_or_403,
max_num_pages=max_num_pages,
**kwargs,
)
def _execute_paginated_retrieval(
retrieval_function: Callable,
list_key: str | None = None,
continue_on_404_or_403: bool = False,
max_num_pages: int | None = None,
**kwargs: Any,
) -> Iterator[GoogleDriveFileType | str]:
"""Execute a paginated retrieval from Google Drive API
Args:
retrieval_function: The specific list function to call (e.g., service.files().list)
list_key: If specified, each object returned by the retrieval function
will be accessed at the specified key and yielded from.
continue_on_404_or_403: If True, the retrieval will continue even if the request returns a 404 or 403 error.
max_num_pages: If specified, the retrieval will stop after the specified number of pages and yield None.
**kwargs: Arguments to pass to the list function
"""
if "fields" not in kwargs or "nextPageToken" not in kwargs["fields"]:
raise ValueError("fields must contain nextPageToken for execute_paginated_retrieval")
next_page_token = kwargs.get(PAGE_TOKEN_KEY, "")
num_pages = 0
while next_page_token is not None:
if max_num_pages is not None and num_pages >= max_num_pages:
yield next_page_token
return
num_pages += 1
request_kwargs = kwargs.copy()
if next_page_token:
request_kwargs[PAGE_TOKEN_KEY] = next_page_token
results = _execute_single_retrieval(
retrieval_function,
continue_on_404_or_403,
**request_kwargs,
)
next_page_token = results.get(NEXT_PAGE_TOKEN_KEY)
if list_key:
for item in results.get(list_key, []):
yield item
else:
yield results
def _execute_single_retrieval(
retrieval_function: Callable,
continue_on_404_or_403: bool = False,
**request_kwargs: Any,
) -> GoogleDriveFileType:
"""Execute a single retrieval from Google Drive API"""
try:
results = retrieval_function(**request_kwargs).execute()
except HttpError as e:
if e.resp.status >= 500:
results = retrieval_function()
elif e.resp.status == 400:
if "pageToken" in request_kwargs and "Invalid Value" in str(e) and "pageToken" in str(e):
logging.warning(f"Invalid page token: {request_kwargs['pageToken']}, retrying from start of request")
request_kwargs.pop("pageToken")
return _execute_single_retrieval(
retrieval_function,
continue_on_404_or_403,
**request_kwargs,
)
logging.error(f"Error executing request: {e}")
raise e
elif e.resp.status == 404 or e.resp.status == 403:
if continue_on_404_or_403:
logging.debug(f"Error executing request: {e}")
results = {}
else:
raise e
elif e.resp.status == 429:
results = retrieval_function()
else:
logging.exception("Error executing request:")
raise e
except (TimeoutError, socket.timeout) as error:
logging.warning(
"Timed out executing Google API request; retrying with backoff. Details: %s",
error,
)
results = retrieval_function()
return results
def get_credentials_from_env(email: str, oauth: bool = False, source="drive") -> dict:
try:
if oauth:
raw_credential_string = os.environ["GOOGLE_OAUTH_CREDENTIALS_JSON_STR"]
else:
raw_credential_string = os.environ["GOOGLE_SERVICE_ACCOUNT_JSON_STR"]
except KeyError:
raise ValueError("Missing Google Drive credentials in environment variables")
try:
credential_dict = json.loads(raw_credential_string)
except json.JSONDecodeError:
raise ValueError("Invalid JSON in Google Drive credentials")
if oauth and source == "drive":
credential_dict = ensure_oauth_token_dict(credential_dict, DocumentSource.GOOGLE_DRIVE)
else:
credential_dict = ensure_oauth_token_dict(credential_dict, DocumentSource.GMAIL)
refried_credential_string = json.dumps(credential_dict)
DB_CREDENTIALS_DICT_TOKEN_KEY = "google_tokens"
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY = "google_service_account_key"
DB_CREDENTIALS_PRIMARY_ADMIN_KEY = "google_primary_admin"
DB_CREDENTIALS_AUTHENTICATION_METHOD = "authentication_method"
cred_key = DB_CREDENTIALS_DICT_TOKEN_KEY if oauth else DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY
return {
cred_key: refried_credential_string,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY: email,
DB_CREDENTIALS_AUTHENTICATION_METHOD: "uploaded",
}
def clean_string(text: str | None) -> str | None:
"""
Clean a string to make it safe for insertion into MySQL (utf8mb4).
- Normalize Unicode
- Remove control characters / zero-width characters
- Optionally remove high-plane emoji and symbols
"""
if text is None:
return None
# 0. Ensure the value is a string
text = str(text)
# 1. Normalize Unicode (NFC)
text = unicodedata.normalize("NFC", text)
# 2. Remove ASCII control characters (except tab, newline, carriage return)
text = re.sub(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]", "", text)
# 3. Remove zero-width characters / BOM
text = re.sub(r"[\u200b-\u200d\uFEFF]", "", text)
# 4. Remove high Unicode characters (emoji, special symbols)
text = re.sub(r"[\U00010000-\U0010FFFF]", "", text)
# 5. Final fallback: strip any invalid UTF-8 sequences
try:
text.encode("utf-8")
except UnicodeEncodeError:
text = text.encode("utf-8", errors="ignore").decode("utf-8")
return text | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_util/util.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/google_util/util_threadpool_concurrency.py | import collections.abc
import copy
import threading
from collections.abc import Callable, Iterator, MutableMapping
from typing import Any, TypeVar, overload
from pydantic import GetCoreSchemaHandler
from pydantic_core import core_schema
R = TypeVar("R")
KT = TypeVar("KT") # Key type
VT = TypeVar("VT") # Value type
_T = TypeVar("_T") # Default type
class ThreadSafeDict(MutableMapping[KT, VT]):
"""
A thread-safe dictionary implementation that uses a lock to ensure thread safety.
Implements the MutableMapping interface to provide a complete dictionary-like interface.
Example usage:
# Create a thread-safe dictionary
safe_dict: ThreadSafeDict[str, int] = ThreadSafeDict()
# Basic operations (atomic)
safe_dict["key"] = 1
value = safe_dict["key"]
del safe_dict["key"]
# Bulk operations (atomic)
safe_dict.update({"key1": 1, "key2": 2})
"""
def __init__(self, input_dict: dict[KT, VT] | None = None) -> None:
self._dict: dict[KT, VT] = input_dict or {}
self.lock = threading.Lock()
def __getitem__(self, key: KT) -> VT:
with self.lock:
return self._dict[key]
def __setitem__(self, key: KT, value: VT) -> None:
with self.lock:
self._dict[key] = value
def __delitem__(self, key: KT) -> None:
with self.lock:
del self._dict[key]
def __iter__(self) -> Iterator[KT]:
# Return a snapshot of keys to avoid potential modification during iteration
with self.lock:
return iter(list(self._dict.keys()))
def __len__(self) -> int:
with self.lock:
return len(self._dict)
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
return core_schema.no_info_after_validator_function(cls.validate, handler(dict[KT, VT]))
@classmethod
def validate(cls, v: Any) -> "ThreadSafeDict[KT, VT]":
if isinstance(v, dict):
return ThreadSafeDict(v)
return v
def __deepcopy__(self, memo: Any) -> "ThreadSafeDict[KT, VT]":
return ThreadSafeDict(copy.deepcopy(self._dict))
def clear(self) -> None:
"""Remove all items from the dictionary atomically."""
with self.lock:
self._dict.clear()
def copy(self) -> dict[KT, VT]:
"""Return a shallow copy of the dictionary atomically."""
with self.lock:
return self._dict.copy()
@overload
def get(self, key: KT) -> VT | None: ...
@overload
def get(self, key: KT, default: VT | _T) -> VT | _T: ...
def get(self, key: KT, default: Any = None) -> Any:
"""Get a value with a default, atomically."""
with self.lock:
return self._dict.get(key, default)
def pop(self, key: KT, default: Any = None) -> Any:
"""Remove and return a value with optional default, atomically."""
with self.lock:
if default is None:
return self._dict.pop(key)
return self._dict.pop(key, default)
def setdefault(self, key: KT, default: VT) -> VT:
"""Set a default value if key is missing, atomically."""
with self.lock:
return self._dict.setdefault(key, default)
def update(self, *args: Any, **kwargs: VT) -> None:
"""Update the dictionary atomically from another mapping or from kwargs."""
with self.lock:
self._dict.update(*args, **kwargs)
def items(self) -> collections.abc.ItemsView[KT, VT]:
"""Return a view of (key, value) pairs atomically."""
with self.lock:
return collections.abc.ItemsView(self)
def keys(self) -> collections.abc.KeysView[KT]:
"""Return a view of keys atomically."""
with self.lock:
return collections.abc.KeysView(self)
def values(self) -> collections.abc.ValuesView[VT]:
"""Return a view of values atomically."""
with self.lock:
return collections.abc.ValuesView(self)
@overload
def atomic_get_set(self, key: KT, value_callback: Callable[[VT], VT], default: VT) -> tuple[VT, VT]: ...
@overload
def atomic_get_set(self, key: KT, value_callback: Callable[[VT | _T], VT], default: VT | _T) -> tuple[VT | _T, VT]: ...
def atomic_get_set(self, key: KT, value_callback: Callable[[Any], VT], default: Any = None) -> tuple[Any, VT]:
"""Replace a value from the dict with a function applied to the previous value, atomically.
Returns:
A tuple of the previous value and the new value.
"""
with self.lock:
val = self._dict.get(key, default)
new_val = value_callback(val)
self._dict[key] = new_val
return val, new_val
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/google_util/util_threadpool_concurrency.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:agent/component/varaiable_aggregator.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import os
from common.connection_utils import timeout
from agent.component.base import ComponentBase, ComponentParamBase
class VariableAggregatorParam(ComponentParamBase):
"""
Parameters for VariableAggregator
- groups: list of dicts {"group_name": str, "variables": [variable selectors]}
"""
def __init__(self):
super().__init__()
# each group expects: {"group_name": str, "variables": List[str]}
self.groups = []
def check(self):
self.check_empty(self.groups, "[VariableAggregator] groups")
for g in self.groups:
if not g.get("group_name"):
raise ValueError("[VariableAggregator] group_name can not be empty!")
if not g.get("variables"):
raise ValueError(
f"[VariableAggregator] variables of group `{g.get('group_name')}` can not be empty"
)
if not isinstance(g.get("variables"), list):
raise ValueError(
f"[VariableAggregator] variables of group `{g.get('group_name')}` should be a list of strings"
)
def get_input_form(self) -> dict[str, dict]:
return {
"variables": {
"name": "Variables",
"type": "list",
}
}
class VariableAggregator(ComponentBase):
component_name = "VariableAggregator"
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)))
def _invoke(self, **kwargs):
# Group mode: for each group, pick the first available variable
for group in self._param.groups:
gname = group.get("group_name")
# record candidate selectors within this group
self.set_input_value(f"{gname}.variables", list(group.get("variables", [])))
for selector in group.get("variables", []):
val = self._canvas.get_variable_value(selector['value'])
if val:
self.set_output(gname, val)
break
@staticmethod
def _to_object(value: Any) -> Any:
# Try to convert value to serializable object if it has to_object()
try:
return value.to_object() # type: ignore[attr-defined]
except Exception:
return value
def thoughts(self) -> str:
return "Aggregating variables from canvas and grouping as configured."
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/varaiable_aggregator.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/exceptions.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TaskCanceledException(Exception):
def __init__(self, msg):
self.msg = msg
class ArgumentException(Exception):
def __init__(self, msg):
self.msg = msg
class NotFoundException(Exception):
def __init__(self, msg):
self.msg = msg
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/exceptions.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/settings.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import secrets
from datetime import date
import logging
from common.constants import RAG_FLOW_SERVICE_NAME
from common.file_utils import get_project_base_directory
from common.config_utils import get_base_config, decrypt_database_config
from common.misc_utils import pip_install_torch
from common.constants import SVR_QUEUE_NAME, Storage
import rag.utils
import rag.utils.es_conn
import rag.utils.infinity_conn
import rag.utils.ob_conn
import rag.utils.opensearch_conn
from rag.utils.azure_sas_conn import RAGFlowAzureSasBlob
from rag.utils.azure_spn_conn import RAGFlowAzureSpnBlob
from rag.utils.gcs_conn import RAGFlowGCS
from rag.utils.minio_conn import RAGFlowMinio
from rag.utils.opendal_conn import OpenDALStorage
from rag.utils.s3_conn import RAGFlowS3
from rag.utils.oss_conn import RAGFlowOSS
from rag.nlp import search
import memory.utils.es_conn as memory_es_conn
import memory.utils.infinity_conn as memory_infinity_conn
import memory.utils.ob_conn as memory_ob_conn
LLM = None
LLM_FACTORY = None
LLM_BASE_URL = None
CHAT_MDL = ""
EMBEDDING_MDL = ""
RERANK_MDL = ""
ASR_MDL = ""
IMAGE2TEXT_MDL = ""
CHAT_CFG = ""
EMBEDDING_CFG = ""
RERANK_CFG = ""
ASR_CFG = ""
IMAGE2TEXT_CFG = ""
API_KEY = None
PARSERS = None
HOST_IP = None
HOST_PORT = None
SECRET_KEY = None
FACTORY_LLM_INFOS = None
ALLOWED_LLM_FACTORIES = None
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
# authentication
AUTHENTICATION_CONF = None
# client
CLIENT_AUTHENTICATION = None
HTTP_APP_KEY = None
GITHUB_OAUTH = None
FEISHU_OAUTH = None
OAUTH_CONFIG = None
DOC_ENGINE = os.getenv('DOC_ENGINE', 'elasticsearch')
DOC_ENGINE_INFINITY = (DOC_ENGINE.lower() == "infinity")
DOC_ENGINE_OCEANBASE = (DOC_ENGINE.lower() == "oceanbase")
docStoreConn = None
msgStoreConn = None
retriever = None
kg_retriever = None
# user registration switch
REGISTER_ENABLED = 1
# SSO-only mode: hide password login form
DISABLE_PASSWORD_LOGIN = False
# sandbox-executor-manager
SANDBOX_HOST = None
STRONG_TEST_COUNT = int(os.environ.get("STRONG_TEST_COUNT", "8"))
SMTP_CONF = None
MAIL_SERVER = ""
MAIL_PORT = 000
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = ""
MAIL_PASSWORD = ""
MAIL_DEFAULT_SENDER = ()
MAIL_FRONTEND_URL = ""
# move from rag.settings
ES = {}
INFINITY = {}
AZURE = {}
S3 = {}
MINIO = {}
OB = {}
OSS = {}
OS = {}
GCS = {}
DOC_MAXIMUM_SIZE: int = 128 * 1024 * 1024
DOC_BULK_SIZE: int = 4
EMBEDDING_BATCH_SIZE: int = 16
PARALLEL_DEVICES: int = 0
STORAGE_IMPL_TYPE = os.getenv('STORAGE_IMPL', 'MINIO')
STORAGE_IMPL = None
def get_svr_queue_name(priority: int) -> str:
if priority == 0:
return SVR_QUEUE_NAME
return f"{SVR_QUEUE_NAME}_{priority}"
def get_svr_queue_names():
return [get_svr_queue_name(priority) for priority in [1, 0]]
def _get_or_create_secret_key():
secret_key = os.environ.get("RAGFLOW_SECRET_KEY")
if secret_key and len(secret_key) >= 32:
return secret_key
# Check if there's a configured secret key
configured_key = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("secret_key")
if configured_key and configured_key != str(date.today()) and len(configured_key) >= 32:
return configured_key
# Generate a new secure key and warn about it
import logging
new_key = secrets.token_hex(32)
logging.warning("SECURITY WARNING: Using auto-generated SECRET_KEY.")
return new_key
class StorageFactory:
storage_mapping = {
Storage.MINIO: RAGFlowMinio,
Storage.AZURE_SPN: RAGFlowAzureSpnBlob,
Storage.AZURE_SAS: RAGFlowAzureSasBlob,
Storage.AWS_S3: RAGFlowS3,
Storage.OSS: RAGFlowOSS,
Storage.OPENDAL: OpenDALStorage,
Storage.GCS: RAGFlowGCS,
}
@classmethod
def create(cls, storage: Storage):
return cls.storage_mapping[storage]()
def init_settings():
global DATABASE_TYPE, DATABASE
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
global ALLOWED_LLM_FACTORIES, LLM_FACTORY, LLM_BASE_URL
llm_settings = get_base_config("user_default_llm", {}) or {}
llm_default_models = llm_settings.get("default_models", {}) or {}
LLM_FACTORY = llm_settings.get("factory", "") or ""
LLM_BASE_URL = llm_settings.get("base_url", "") or ""
ALLOWED_LLM_FACTORIES = llm_settings.get("allowed_factories", None)
global REGISTER_ENABLED
try:
REGISTER_ENABLED = int(os.environ.get("REGISTER_ENABLED", "1"))
except Exception:
pass
global DISABLE_PASSWORD_LOGIN
try:
env_val = os.environ.get("DISABLE_PASSWORD_LOGIN", "").lower()
if env_val in ("1", "true", "yes"):
DISABLE_PASSWORD_LOGIN = True
else:
authentication_conf = get_base_config("authentication", {})
DISABLE_PASSWORD_LOGIN = bool(authentication_conf.get("disable_password_login", False))
except Exception:
pass
global FACTORY_LLM_INFOS
try:
with open(os.path.join(get_project_base_directory(), "conf", "llm_factories.json"), "r") as f:
FACTORY_LLM_INFOS = json.load(f)["factory_llm_infos"]
except Exception:
FACTORY_LLM_INFOS = []
global API_KEY
API_KEY = llm_settings.get("api_key")
global PARSERS
PARSERS = llm_settings.get(
"parsers", "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"
)
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
chat_entry = _parse_model_entry(llm_default_models.get("chat_model", CHAT_MDL))
embedding_entry = _parse_model_entry(llm_default_models.get("embedding_model", EMBEDDING_MDL))
rerank_entry = _parse_model_entry(llm_default_models.get("rerank_model", RERANK_MDL))
asr_entry = _parse_model_entry(llm_default_models.get("asr_model", ASR_MDL))
image2text_entry = _parse_model_entry(llm_default_models.get("image2text_model", IMAGE2TEXT_MDL))
global CHAT_CFG, EMBEDDING_CFG, RERANK_CFG, ASR_CFG, IMAGE2TEXT_CFG
CHAT_CFG = _resolve_per_model_config(chat_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
EMBEDDING_CFG = _resolve_per_model_config(embedding_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
RERANK_CFG = _resolve_per_model_config(rerank_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
ASR_CFG = _resolve_per_model_config(asr_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
IMAGE2TEXT_CFG = _resolve_per_model_config(image2text_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
CHAT_MDL = CHAT_CFG.get("model", "") or ""
EMBEDDING_MDL = EMBEDDING_CFG.get("model", "") or ""
compose_profiles = os.getenv("COMPOSE_PROFILES", "")
if "tei-" in compose_profiles:
EMBEDDING_MDL = os.getenv("TEI_MODEL", EMBEDDING_MDL or "BAAI/bge-small-en-v1.5")
RERANK_MDL = RERANK_CFG.get("model", "") or ""
ASR_MDL = ASR_CFG.get("model", "") or ""
IMAGE2TEXT_MDL = IMAGE2TEXT_CFG.get("model", "") or ""
global HOST_IP, HOST_PORT
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
global SECRET_KEY
SECRET_KEY = _get_or_create_secret_key()
# authentication
authentication_conf = get_base_config("authentication", {})
global CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH, OAUTH_CONFIG
# client
CLIENT_AUTHENTICATION = authentication_conf.get("client", {}).get("switch", False)
HTTP_APP_KEY = authentication_conf.get("client", {}).get("http_app_key")
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
OAUTH_CONFIG = get_base_config("oauth", {})
global DOC_ENGINE, DOC_ENGINE_INFINITY, DOC_ENGINE_OCEANBASE, docStoreConn, ES, OB, OS, INFINITY
DOC_ENGINE = os.environ.get("DOC_ENGINE", "elasticsearch").strip()
DOC_ENGINE_INFINITY = (DOC_ENGINE.lower() == "infinity")
DOC_ENGINE_OCEANBASE = (DOC_ENGINE.lower() == "oceanbase")
lower_case_doc_engine = DOC_ENGINE.lower()
if lower_case_doc_engine == "elasticsearch":
ES = get_base_config("es", {})
docStoreConn = rag.utils.es_conn.ESConnection()
elif lower_case_doc_engine == "infinity":
INFINITY = get_base_config("infinity", {
"uri": "infinity:23817",
"postgres_port": 5432,
"db_name": "default_db"
})
docStoreConn = rag.utils.infinity_conn.InfinityConnection()
elif lower_case_doc_engine == "opensearch":
OS = get_base_config("os", {})
docStoreConn = rag.utils.opensearch_conn.OSConnection()
elif lower_case_doc_engine == "oceanbase":
OB = get_base_config("oceanbase", {})
docStoreConn = rag.utils.ob_conn.OBConnection()
elif lower_case_doc_engine == "seekdb":
OB = get_base_config("seekdb", {})
docStoreConn = rag.utils.ob_conn.OBConnection()
else:
raise Exception(f"Not supported doc engine: {DOC_ENGINE}")
global msgStoreConn
# use the same engine for message store
if DOC_ENGINE == "elasticsearch":
ES = get_base_config("es", {})
msgStoreConn = memory_es_conn.ESConnection()
elif DOC_ENGINE == "infinity":
INFINITY = get_base_config("infinity", {
"uri": "infinity:23817",
"postgres_port": 5432,
"db_name": "default_db"
})
msgStoreConn = memory_infinity_conn.InfinityConnection()
elif lower_case_doc_engine in ["oceanbase", "seekdb"]:
msgStoreConn = memory_ob_conn.OBConnection()
global AZURE, S3, MINIO, OSS, GCS
if STORAGE_IMPL_TYPE in ['AZURE_SPN', 'AZURE_SAS']:
AZURE = get_base_config("azure", {})
elif STORAGE_IMPL_TYPE == 'AWS_S3':
S3 = get_base_config("s3", {})
elif STORAGE_IMPL_TYPE == 'MINIO':
MINIO = decrypt_database_config(name="minio")
elif STORAGE_IMPL_TYPE == 'OSS':
OSS = get_base_config("oss", {})
elif STORAGE_IMPL_TYPE == 'GCS':
GCS = get_base_config("gcs", {})
global STORAGE_IMPL
storage_impl = StorageFactory.create(Storage[STORAGE_IMPL_TYPE])
# Define crypto settings
crypto_enabled = os.environ.get("RAGFLOW_CRYPTO_ENABLED", "false").lower() == "true"
# Check if encryption is enabled
if crypto_enabled:
try:
from rag.utils.encrypted_storage import create_encrypted_storage
algorithm = os.environ.get("RAGFLOW_CRYPTO_ALGORITHM", "aes-256-cbc")
crypto_key = os.environ.get("RAGFLOW_CRYPTO_KEY")
STORAGE_IMPL = create_encrypted_storage(storage_impl,
algorithm=algorithm,
key=crypto_key,
encryption_enabled=crypto_enabled)
except Exception as e:
logging.error(f"Failed to initialize encrypted storage: {e}")
STORAGE_IMPL = storage_impl
else:
STORAGE_IMPL = storage_impl
global retriever, kg_retriever
retriever = search.Dealer(docStoreConn)
from rag.graphrag import search as kg_search
kg_retriever = kg_search.KGSearch(docStoreConn)
global SANDBOX_HOST
if int(os.environ.get("SANDBOX_ENABLED", "0")):
SANDBOX_HOST = os.environ.get("SANDBOX_HOST", "sandbox-executor-manager")
global SMTP_CONF
SMTP_CONF = get_base_config("smtp", {})
global MAIL_SERVER, MAIL_PORT, MAIL_USE_SSL, MAIL_USE_TLS, MAIL_USERNAME, MAIL_PASSWORD, MAIL_DEFAULT_SENDER, MAIL_FRONTEND_URL
MAIL_SERVER = SMTP_CONF.get("mail_server", "")
MAIL_PORT = SMTP_CONF.get("mail_port", 000)
MAIL_USE_SSL = SMTP_CONF.get("mail_use_ssl", True)
MAIL_USE_TLS = SMTP_CONF.get("mail_use_tls", False)
MAIL_USERNAME = SMTP_CONF.get("mail_username", "")
MAIL_PASSWORD = SMTP_CONF.get("mail_password", "")
mail_default_sender = SMTP_CONF.get("mail_default_sender", [])
if mail_default_sender and len(mail_default_sender) >= 2:
MAIL_DEFAULT_SENDER = (mail_default_sender[0], mail_default_sender[1])
MAIL_FRONTEND_URL = SMTP_CONF.get("mail_frontend_url", "")
global DOC_MAXIMUM_SIZE, DOC_BULK_SIZE, EMBEDDING_BATCH_SIZE
DOC_MAXIMUM_SIZE = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
DOC_BULK_SIZE = int(os.environ.get("DOC_BULK_SIZE", 4))
EMBEDDING_BATCH_SIZE = int(os.environ.get("EMBEDDING_BATCH_SIZE", 16))
os.environ["DOTNET_SYSTEM_GLOBALIZATION_INVARIANT"] = "1"
def check_and_install_torch():
global PARALLEL_DEVICES
try:
pip_install_torch()
import torch.cuda
PARALLEL_DEVICES = torch.cuda.device_count()
logging.info(f"found {PARALLEL_DEVICES} gpus")
except Exception:
logging.info("can't import package 'torch'")
def _parse_model_entry(entry):
if isinstance(entry, str):
return {"name": entry, "factory": None, "api_key": None, "base_url": None}
if isinstance(entry, dict):
name = entry.get("name") or entry.get("model") or ""
return {
"name": name,
"factory": entry.get("factory"),
"api_key": entry.get("api_key"),
"base_url": entry.get("base_url"),
}
return {"name": "", "factory": None, "api_key": None, "base_url": None}
def _resolve_per_model_config(entry_dict, backup_factory, backup_api_key, backup_base_url):
name = (entry_dict.get("name") or "").strip()
m_factory = entry_dict.get("factory") or backup_factory or ""
m_api_key = entry_dict.get("api_key") or backup_api_key or ""
m_base_url = entry_dict.get("base_url") or backup_base_url or ""
if name and "@" not in name and m_factory:
name = f"{name}@{m_factory}"
return {
"model": name,
"factory": m_factory,
"api_key": m_api_key,
"base_url": m_base_url,
}
def print_rag_settings():
logging.info(f"MAX_CONTENT_LENGTH: {DOC_MAXIMUM_SIZE}")
logging.info(f"MAX_FILE_COUNT_PER_USER: {int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/settings.py",
"license": "Apache License 2.0",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/signal_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from datetime import datetime
import logging
import tracemalloc
from common.log_utils import get_project_base_directory
# SIGUSR1 handler: start tracemalloc and take snapshot
def start_tracemalloc_and_snapshot(signum, frame):
if not tracemalloc.is_tracing():
logging.info("start tracemalloc")
tracemalloc.start()
else:
logging.info("tracemalloc is already running")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
snapshot_file = f"snapshot_{timestamp}.trace"
snapshot_file = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{os.getpid()}_snapshot_{timestamp}.trace"))
snapshot = tracemalloc.take_snapshot()
snapshot.dump(snapshot_file)
current, peak = tracemalloc.get_traced_memory()
if sys.platform == "win32":
import psutil
process = psutil.Process()
max_rss = process.memory_info().rss / 1024
else:
import resource
max_rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logging.info(f"taken snapshot {snapshot_file}. max RSS={max_rss / 1000:.2f} MB, current memory usage: {current / 10**6:.2f} MB, Peak memory usage: {peak / 10**6:.2f} MB")
# SIGUSR2 handler: stop tracemalloc
def stop_tracemalloc(signum, frame):
if tracemalloc.is_tracing():
logging.info("stop tracemalloc")
tracemalloc.stop()
else:
logging.info("tracemalloc not running")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/signal_utils.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:agent/component/data_operations.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
import ast
import os
from agent.component.base import ComponentBase, ComponentParamBase
from api.utils.api_utils import timeout
class DataOperationsParam(ComponentParamBase):
"""
Define the Data Operations component parameters.
"""
def __init__(self):
super().__init__()
self.query = []
self.operations = "literal_eval"
self.select_keys = []
self.filter_values=[]
self.updates=[]
self.remove_keys=[]
self.rename_keys=[]
self.outputs = {
"result": {
"value": [],
"type": "Array of Object"
}
}
def check(self):
self.check_valid_value(self.operations, "Support operations", ["select_keys", "literal_eval","combine","filter_values","append_or_update","remove_keys","rename_keys"])
class DataOperations(ComponentBase,ABC):
component_name = "DataOperations"
def get_input_form(self) -> dict[str, dict]:
return {
k: {"name": o.get("name", ""), "type": "line"}
for input_item in (self._param.query or [])
for k, o in self.get_input_elements_from_text(input_item).items()
}
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
def _invoke(self, **kwargs):
self.input_objects=[]
inputs = getattr(self._param, "query", None)
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
for input_ref in inputs:
input_object=self._canvas.get_variable_value(input_ref)
self.set_input_value(input_ref, input_object)
if input_object is None:
continue
if isinstance(input_object,dict):
self.input_objects.append(input_object)
elif isinstance(input_object,list):
self.input_objects.extend(x for x in input_object if isinstance(x, dict))
else:
continue
if self._param.operations == "select_keys":
self._select_keys()
elif self._param.operations == "recursive_eval":
self._literal_eval()
elif self._param.operations == "combine":
self._combine()
elif self._param.operations == "filter_values":
self._filter_values()
elif self._param.operations == "append_or_update":
self._append_or_update()
elif self._param.operations == "remove_keys":
self._remove_keys()
else:
self._rename_keys()
def _select_keys(self):
filter_criteria: list[str] = self._param.select_keys
results = [{key: value for key, value in data_dict.items() if key in filter_criteria} for data_dict in self.input_objects]
self.set_output("result", results)
def _recursive_eval(self, data):
if isinstance(data, dict):
return {k: self.recursive_eval(v) for k, v in data.items()}
if isinstance(data, list):
return [self.recursive_eval(item) for item in data]
if isinstance(data, str):
try:
if (
data.strip().startswith(("{", "[", "(", "'", '"'))
or data.strip().lower() in ("true", "false", "none")
or data.strip().replace(".", "").isdigit()
):
return ast.literal_eval(data)
except (ValueError, SyntaxError, TypeError, MemoryError):
return data
else:
return data
return data
def _literal_eval(self):
self.set_output("result", self._recursive_eval(self.input_objects))
def _combine(self):
result={}
for obj in self.input_objects:
for key, value in obj.items():
if key not in result:
result[key] = value
elif isinstance(result[key], list):
if isinstance(value, list):
result[key].extend(value)
else:
result[key].append(value)
else:
result[key] = (
[result[key], value] if not isinstance(value, list) else [result[key], *value]
)
self.set_output("result", result)
def norm(self,v):
s = "" if v is None else str(v)
return s
def match_rule(self, obj, rule):
key = rule.get("key")
op = (rule.get("operator") or "equals").lower()
target = self.norm(rule.get("value"))
target = self._canvas.get_value_with_variable(target) or target
if key not in obj:
return False
val = obj.get(key, None)
v = self.norm(val)
if op == "=":
return v == target
if op == "≠":
return v != target
if op == "contains":
return target in v
if op == "start with":
return v.startswith(target)
if op == "end with":
return v.endswith(target)
return False
def _filter_values(self):
results=[]
rules = (getattr(self._param, "filter_values", None) or [])
for obj in self.input_objects:
if not rules:
results.append(obj)
continue
if all(self.match_rule(obj, r) for r in rules):
results.append(obj)
self.set_output("result", results)
def _append_or_update(self):
results=[]
updates = getattr(self._param, "updates", []) or []
for obj in self.input_objects:
new_obj = dict(obj)
for item in updates:
if not isinstance(item, dict):
continue
k = (item.get("key") or "").strip()
if not k:
continue
new_obj[k] = self._canvas.get_value_with_variable(item.get("value")) or item.get("value")
results.append(new_obj)
self.set_output("result", results)
def _remove_keys(self):
results = []
remove_keys = getattr(self._param, "remove_keys", []) or []
for obj in (self.input_objects or []):
new_obj = dict(obj)
for k in remove_keys:
if not isinstance(k, str):
continue
new_obj.pop(k, None)
results.append(new_obj)
self.set_output("result", results)
def _rename_keys(self):
results = []
rename_pairs = getattr(self._param, "rename_keys", []) or []
for obj in (self.input_objects or []):
new_obj = dict(obj)
for pair in rename_pairs:
if not isinstance(pair, dict):
continue
old = (pair.get("old_key") or "").strip()
new = (pair.get("new_key") or "").strip()
if not old or not new or old == new:
continue
if old in new_obj:
new_obj[new] = new_obj.pop(old)
results.append(new_obj)
self.set_output("result", results)
def thoughts(self) -> str:
return "DataOperation in progress"
| {
"repo_id": "infiniflow/ragflow",
"file_path": "agent/component/data_operations.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/connection_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import queue
import threading
from typing import Any, Callable, Coroutine, Optional, Type, Union
import asyncio
from functools import wraps
from quart import make_response, jsonify
from common.constants import RetCode
TimeoutException = Union[Type[BaseException], BaseException]
OnTimeoutCallback = Union[Callable[..., Any], Coroutine[Any, Any, Any]]
def timeout(seconds: float | int | str = None, attempts: int = 2, *, exception: Optional[TimeoutException] = None,
on_timeout: Optional[OnTimeoutCallback] = None):
if isinstance(seconds, str):
seconds = float(seconds)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
result_queue = queue.Queue(maxsize=1)
def target():
try:
result = func(*args, **kwargs)
result_queue.put(result)
except Exception as e:
result_queue.put(e)
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
for a in range(attempts):
try:
if os.environ.get("ENABLE_TIMEOUT_ASSERTION"):
result = result_queue.get(timeout=seconds)
else:
result = result_queue.get()
if isinstance(result, Exception):
raise result
return result
except queue.Empty:
pass
raise TimeoutError(f"Function '{func.__name__}' timed out after {seconds} seconds and {attempts} attempts.")
@wraps(func)
async def async_wrapper(*args, **kwargs) -> Any:
if seconds is None:
return await func(*args, **kwargs)
for a in range(attempts):
try:
if os.environ.get("ENABLE_TIMEOUT_ASSERTION"):
return await asyncio.wait_for(func(*args, **kwargs), timeout=seconds)
else:
return await func(*args, **kwargs)
except asyncio.TimeoutError:
if a < attempts - 1:
continue
if on_timeout is not None:
if callable(on_timeout):
result = on_timeout()
if isinstance(result, Coroutine):
return await result
return result
return on_timeout
if exception is None:
raise TimeoutError(f"Operation timed out after {seconds} seconds and {attempts} attempts.")
if isinstance(exception, BaseException):
raise exception
if isinstance(exception, type) and issubclass(exception, BaseException):
raise exception(f"Operation timed out after {seconds} seconds and {attempts} attempts.")
raise RuntimeError("Invalid exception type provided")
if asyncio.iscoroutinefunction(func):
return async_wrapper
return wrapper
return decorator
async def construct_response(code=RetCode.SUCCESS, message="success", data=None, auth=None):
result_dict = {"code": code, "message": message, "data": data}
response_dict = {}
for key, value in result_dict.items():
if value is None and key != "code":
continue
else:
response_dict[key] = value
response = await make_response(jsonify(response_dict))
if auth:
response.headers["Authorization"] = auth
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Method"] = "*"
response.headers["Access-Control-Allow-Headers"] = "*"
response.headers["Access-Control-Allow-Headers"] = "*"
response.headers["Access-Control-Expose-Headers"] = "Authorization"
return response
def sync_construct_response(code=RetCode.SUCCESS, message="success", data=None, auth=None):
import flask
result_dict = {"code": code, "message": message, "data": data}
response_dict = {}
for key, value in result_dict.items():
if value is None and key != "code":
continue
else:
response_dict[key] = value
response = flask.make_response(flask.jsonify(response_dict))
if auth:
response.headers["Authorization"] = auth
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Method"] = "*"
response.headers["Access-Control-Allow-Headers"] = "*"
response.headers["Access-Control-Allow-Headers"] = "*"
response.headers["Access-Control-Expose-Headers"] = "Authorization"
return response
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/connection_utils.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/log_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path
import logging
from logging.handlers import RotatingFileHandler
from common.file_utils import get_project_base_directory
initialized_root_logger = False
def init_root_logger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
global initialized_root_logger
if initialized_root_logger:
return
initialized_root_logger = True
logger = logging.getLogger()
logger.handlers.clear()
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
os.makedirs(os.path.dirname(log_path), exist_ok=True)
formatter = logging.Formatter(log_format)
handler1 = RotatingFileHandler(log_path, maxBytes=10*1024*1024, backupCount=5)
handler1.setFormatter(formatter)
logger.addHandler(handler1)
handler2 = logging.StreamHandler()
handler2.setFormatter(formatter)
logger.addHandler(handler2)
logging.captureWarnings(True)
LOG_LEVELS = os.environ.get("LOG_LEVELS", "")
pkg_levels = {}
for pkg_name_level in LOG_LEVELS.split(","):
terms = pkg_name_level.split("=")
if len(terms)!= 2:
continue
pkg_name, pkg_level = terms[0], terms[1]
pkg_name = pkg_name.strip()
pkg_level = logging.getLevelName(pkg_level.strip().upper())
if not isinstance(pkg_level, int):
pkg_level = logging.INFO
pkg_levels[pkg_name] = logging.getLevelName(pkg_level)
for pkg_name in ['peewee', 'pdfminer']:
if pkg_name not in pkg_levels:
pkg_levels[pkg_name] = logging.getLevelName(logging.WARNING)
if 'root' not in pkg_levels:
pkg_levels['root'] = logging.getLevelName(logging.INFO)
for pkg_name, pkg_level in pkg_levels.items():
pkg_logger = logging.getLogger(pkg_name)
pkg_logger.setLevel(pkg_level)
msg = f"{logfile_basename} log path: {log_path}, log levels: {pkg_levels}"
logger.info(msg)
def log_exception(e, *args):
logging.exception(e)
for a in args:
try:
text = getattr(a, "text")
except Exception:
text = None
if text is not None:
logging.error(text)
raise Exception(text)
logging.error(str(a))
raise e
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/log_utils.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/apps/connector_app.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import json
import logging
import time
import uuid
from html import escape
from typing import Any
from quart import request, make_response
from google_auth_oauthlib.flow import Flow
from api.db import InputType
from api.db.services.connector_service import ConnectorService, SyncLogsService
from api.utils.api_utils import get_data_error_result, get_json_result, get_request_json, validate_request
from common.constants import RetCode, TaskStatus
from common.data_source.config import GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI, GMAIL_WEB_OAUTH_REDIRECT_URI, BOX_WEB_OAUTH_REDIRECT_URI, DocumentSource
from common.data_source.google_util.constant import WEB_OAUTH_POPUP_TEMPLATE, GOOGLE_SCOPES
from common.misc_utils import get_uuid
from rag.utils.redis_conn import REDIS_CONN
from api.apps import login_required, current_user
from box_sdk_gen import BoxOAuth, OAuthConfig, GetAuthorizeUrlOptions
@manager.route("/set", methods=["POST"]) # noqa: F821
@login_required
async def set_connector():
req = await get_request_json()
if req.get("id"):
conn = {fld: req[fld] for fld in ["prune_freq", "refresh_freq", "config", "timeout_secs"] if fld in req}
ConnectorService.update_by_id(req["id"], conn)
else:
req["id"] = get_uuid()
conn = {
"id": req["id"],
"tenant_id": current_user.id,
"name": req["name"],
"source": req["source"],
"input_type": InputType.POLL,
"config": req["config"],
"refresh_freq": int(req.get("refresh_freq", 5)),
"prune_freq": int(req.get("prune_freq", 720)),
"timeout_secs": int(req.get("timeout_secs", 60 * 29)),
"status": TaskStatus.SCHEDULE,
}
ConnectorService.save(**conn)
await asyncio.sleep(1)
e, conn = ConnectorService.get_by_id(req["id"])
return get_json_result(data=conn.to_dict())
@manager.route("/list", methods=["GET"]) # noqa: F821
@login_required
def list_connector():
return get_json_result(data=ConnectorService.list(current_user.id))
@manager.route("/<connector_id>", methods=["GET"]) # noqa: F821
@login_required
def get_connector(connector_id):
e, conn = ConnectorService.get_by_id(connector_id)
if not e:
return get_data_error_result(message="Can't find this Connector!")
return get_json_result(data=conn.to_dict())
@manager.route("/<connector_id>/logs", methods=["GET"]) # noqa: F821
@login_required
def list_logs(connector_id):
req = request.args.to_dict(flat=True)
arr, total = SyncLogsService.list_sync_tasks(connector_id, int(req.get("page", 1)), int(req.get("page_size", 15)))
return get_json_result(data={"total": total, "logs": arr})
@manager.route("/<connector_id>/resume", methods=["PUT"]) # noqa: F821
@login_required
async def resume(connector_id):
req = await get_request_json()
if req.get("resume"):
ConnectorService.resume(connector_id, TaskStatus.SCHEDULE)
else:
ConnectorService.resume(connector_id, TaskStatus.CANCEL)
return get_json_result(data=True)
@manager.route("/<connector_id>/rebuild", methods=["PUT"]) # noqa: F821
@login_required
@validate_request("kb_id")
async def rebuild(connector_id):
req = await get_request_json()
err = ConnectorService.rebuild(req["kb_id"], connector_id, current_user.id)
if err:
return get_json_result(data=False, message=err, code=RetCode.SERVER_ERROR)
return get_json_result(data=True)
@manager.route("/<connector_id>/rm", methods=["POST"]) # noqa: F821
@login_required
def rm_connector(connector_id):
ConnectorService.resume(connector_id, TaskStatus.CANCEL)
ConnectorService.delete_by_id(connector_id)
return get_json_result(data=True)
WEB_FLOW_TTL_SECS = 15 * 60
def _web_state_cache_key(flow_id: str, source_type: str | None = None) -> str:
"""Return Redis key for web OAuth state.
The default prefix keeps backward compatibility for Google Drive.
When source_type == "gmail", a different prefix is used so that
Drive/Gmail flows don't clash in Redis.
"""
prefix = f"{source_type}_web_flow_state"
return f"{prefix}:{flow_id}"
def _web_result_cache_key(flow_id: str, source_type: str | None = None) -> str:
"""Return Redis key for web OAuth result.
Mirrors _web_state_cache_key logic for result storage.
"""
prefix = f"{source_type}_web_flow_result"
return f"{prefix}:{flow_id}"
def _load_credentials(payload: str | dict[str, Any]) -> dict[str, Any]:
if isinstance(payload, dict):
return payload
try:
return json.loads(payload)
except json.JSONDecodeError as exc: # pragma: no cover - defensive
raise ValueError("Invalid Google credentials JSON.") from exc
def _get_web_client_config(credentials: dict[str, Any]) -> dict[str, Any]:
web_section = credentials.get("web")
if not isinstance(web_section, dict):
raise ValueError("Google OAuth JSON must include a 'web' client configuration to use browser-based authorization.")
return {"web": web_section}
async def _render_web_oauth_popup(flow_id: str, success: bool, message: str, source="drive"):
status = "success" if success else "error"
auto_close = "window.close();" if success else ""
escaped_message = escape(message)
# Drive: ragflow-google-drive-oauth
# Gmail: ragflow-gmail-oauth
payload_type = f"ragflow-{source}-oauth"
payload_json = json.dumps(
{
"type": payload_type,
"status": status,
"flowId": flow_id or "",
"message": message,
}
)
# TODO(google-oauth): title/heading/message may need to reflect drive/gmail based on cached type
html = WEB_OAUTH_POPUP_TEMPLATE.format(
title=f"Google {source.capitalize()} Authorization",
heading="Authorization complete" if success else "Authorization failed",
message=escaped_message,
payload_json=payload_json,
auto_close=auto_close,
)
response = await make_response(html, 200)
response.headers["Content-Type"] = "text/html; charset=utf-8"
return response
@manager.route("/google/oauth/web/start", methods=["POST"]) # noqa: F821
@login_required
@validate_request("credentials")
async def start_google_web_oauth():
source = request.args.get("type", "google-drive")
if source not in ("google-drive", "gmail"):
return get_json_result(code=RetCode.ARGUMENT_ERROR, message="Invalid Google OAuth type.")
req = await get_request_json()
if source == "gmail":
default_redirect_uri = GMAIL_WEB_OAUTH_REDIRECT_URI
scopes = GOOGLE_SCOPES[DocumentSource.GMAIL]
else:
default_redirect_uri = GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI
scopes = GOOGLE_SCOPES[DocumentSource.GOOGLE_DRIVE]
redirect_uri = req.get("redirect_uri", default_redirect_uri)
if isinstance(redirect_uri, str):
redirect_uri = redirect_uri.strip()
if not redirect_uri:
return get_json_result(
code=RetCode.SERVER_ERROR,
message="Google OAuth redirect URI is not configured on the server.",
)
raw_credentials = req.get("credentials", "")
try:
credentials = _load_credentials(raw_credentials)
print(credentials)
except ValueError as exc:
return get_json_result(code=RetCode.ARGUMENT_ERROR, message=str(exc))
if credentials.get("refresh_token"):
return get_json_result(
code=RetCode.ARGUMENT_ERROR,
message="Uploaded credentials already include a refresh token.",
)
try:
client_config = _get_web_client_config(credentials)
except ValueError as exc:
return get_json_result(code=RetCode.ARGUMENT_ERROR, message=str(exc))
flow_id = str(uuid.uuid4())
try:
flow = Flow.from_client_config(client_config, scopes=scopes)
flow.redirect_uri = redirect_uri
authorization_url, _ = flow.authorization_url(
access_type="offline",
include_granted_scopes="true",
prompt="consent",
state=flow_id,
)
except Exception as exc: # pragma: no cover - defensive
logging.exception("Failed to create Google OAuth flow: %s", exc)
return get_json_result(
code=RetCode.SERVER_ERROR,
message="Failed to initialize Google OAuth flow. Please verify the uploaded client configuration.",
)
cache_payload = {
"user_id": current_user.id,
"client_config": client_config,
"redirect_uri": redirect_uri,
"created_at": int(time.time()),
}
REDIS_CONN.set_obj(_web_state_cache_key(flow_id, source), cache_payload, WEB_FLOW_TTL_SECS)
return get_json_result(
data={
"flow_id": flow_id,
"authorization_url": authorization_url,
"expires_in": WEB_FLOW_TTL_SECS,
}
)
@manager.route("/gmail/oauth/web/callback", methods=["GET"]) # noqa: F821
async def google_gmail_web_oauth_callback():
state_id = request.args.get("state")
error = request.args.get("error")
source = "gmail"
error_description = request.args.get("error_description") or error
if not state_id:
return await _render_web_oauth_popup("", False, "Missing OAuth state parameter.", source)
state_cache = REDIS_CONN.get(_web_state_cache_key(state_id, source))
if not state_cache:
return await _render_web_oauth_popup(state_id, False, "Authorization session expired. Please restart from the main window.", source)
state_obj = json.loads(state_cache)
client_config = state_obj.get("client_config")
redirect_uri = state_obj.get("redirect_uri", GMAIL_WEB_OAUTH_REDIRECT_URI)
if not client_config:
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, False, "Authorization session was invalid. Please retry.", source)
if error:
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, False, error_description or "Authorization was cancelled.", source)
code = request.args.get("code")
if not code:
return await _render_web_oauth_popup(state_id, False, "Missing authorization code from Google.", source)
try:
# TODO(google-oauth): branch scopes/redirect_uri based on source_type (drive vs gmail)
flow = Flow.from_client_config(client_config, scopes=GOOGLE_SCOPES[DocumentSource.GMAIL])
flow.redirect_uri = redirect_uri
flow.fetch_token(code=code)
except Exception as exc: # pragma: no cover - defensive
logging.exception("Failed to exchange Google OAuth code: %s", exc)
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, False, "Failed to exchange tokens with Google. Please retry.", source)
creds_json = flow.credentials.to_json()
result_payload = {
"user_id": state_obj.get("user_id"),
"credentials": creds_json,
}
REDIS_CONN.set_obj(_web_result_cache_key(state_id, source), result_payload, WEB_FLOW_TTL_SECS)
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, True, "Authorization completed successfully.", source)
@manager.route("/google-drive/oauth/web/callback", methods=["GET"]) # noqa: F821
async def google_drive_web_oauth_callback():
state_id = request.args.get("state")
error = request.args.get("error")
source = "google-drive"
error_description = request.args.get("error_description") or error
if not state_id:
return await _render_web_oauth_popup("", False, "Missing OAuth state parameter.", source)
state_cache = REDIS_CONN.get(_web_state_cache_key(state_id, source))
if not state_cache:
return await _render_web_oauth_popup(state_id, False, "Authorization session expired. Please restart from the main window.", source)
state_obj = json.loads(state_cache)
client_config = state_obj.get("client_config")
redirect_uri = state_obj.get("redirect_uri", GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI)
if not client_config:
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, False, "Authorization session was invalid. Please retry.", source)
if error:
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, False, error_description or "Authorization was cancelled.", source)
code = request.args.get("code")
if not code:
return await _render_web_oauth_popup(state_id, False, "Missing authorization code from Google.", source)
try:
# TODO(google-oauth): branch scopes/redirect_uri based on source_type (drive vs gmail)
flow = Flow.from_client_config(client_config, scopes=GOOGLE_SCOPES[DocumentSource.GOOGLE_DRIVE])
flow.redirect_uri = redirect_uri
flow.fetch_token(code=code)
except Exception as exc: # pragma: no cover - defensive
logging.exception("Failed to exchange Google OAuth code: %s", exc)
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, False, "Failed to exchange tokens with Google. Please retry.", source)
creds_json = flow.credentials.to_json()
result_payload = {
"user_id": state_obj.get("user_id"),
"credentials": creds_json,
}
REDIS_CONN.set_obj(_web_result_cache_key(state_id, source), result_payload, WEB_FLOW_TTL_SECS)
REDIS_CONN.delete(_web_state_cache_key(state_id, source))
return await _render_web_oauth_popup(state_id, True, "Authorization completed successfully.", source)
@manager.route("/google/oauth/web/result", methods=["POST"]) # noqa: F821
@login_required
@validate_request("flow_id")
async def poll_google_web_result():
req = await request.json or {}
source = request.args.get("type")
if source not in ("google-drive", "gmail"):
return get_json_result(code=RetCode.ARGUMENT_ERROR, message="Invalid Google OAuth type.")
flow_id = req.get("flow_id")
cache_raw = REDIS_CONN.get(_web_result_cache_key(flow_id, source))
if not cache_raw:
return get_json_result(code=RetCode.RUNNING, message="Authorization is still pending.")
result = json.loads(cache_raw)
if result.get("user_id") != current_user.id:
return get_json_result(code=RetCode.PERMISSION_ERROR, message="You are not allowed to access this authorization result.")
REDIS_CONN.delete(_web_result_cache_key(flow_id, source))
return get_json_result(data={"credentials": result.get("credentials")})
@manager.route("/box/oauth/web/start", methods=["POST"]) # noqa: F821
@login_required
async def start_box_web_oauth():
req = await get_request_json()
client_id = req.get("client_id")
client_secret = req.get("client_secret")
redirect_uri = req.get("redirect_uri", BOX_WEB_OAUTH_REDIRECT_URI)
if not client_id or not client_secret:
return get_json_result(code=RetCode.ARGUMENT_ERROR, message="Box client_id and client_secret are required.")
flow_id = str(uuid.uuid4())
box_auth = BoxOAuth(
OAuthConfig(
client_id=client_id,
client_secret=client_secret,
)
)
auth_url = box_auth.get_authorize_url(
options=GetAuthorizeUrlOptions(
redirect_uri=redirect_uri,
state=flow_id,
)
)
cache_payload = {
"user_id": current_user.id,
"auth_url": auth_url,
"client_id": client_id,
"client_secret": client_secret,
"created_at": int(time.time()),
}
REDIS_CONN.set_obj(_web_state_cache_key(flow_id, "box"), cache_payload, WEB_FLOW_TTL_SECS)
return get_json_result(
data = {
"flow_id": flow_id,
"authorization_url": auth_url,
"expires_in": WEB_FLOW_TTL_SECS,}
)
@manager.route("/box/oauth/web/callback", methods=["GET"]) # noqa: F821
async def box_web_oauth_callback():
flow_id = request.args.get("state")
if not flow_id:
return await _render_web_oauth_popup("", False, "Missing OAuth parameters.", "box")
code = request.args.get("code")
if not code:
return await _render_web_oauth_popup(flow_id, False, "Missing authorization code from Box.", "box")
cache_payload = json.loads(REDIS_CONN.get(_web_state_cache_key(flow_id, "box")))
if not cache_payload:
return get_json_result(code=RetCode.ARGUMENT_ERROR, message="Box OAuth session expired or invalid.")
error = request.args.get("error")
error_description = request.args.get("error_description") or error
if error:
REDIS_CONN.delete(_web_state_cache_key(flow_id, "box"))
return await _render_web_oauth_popup(flow_id, False, error_description or "Authorization failed.", "box")
auth = BoxOAuth(
OAuthConfig(
client_id=cache_payload.get("client_id"),
client_secret=cache_payload.get("client_secret"),
)
)
auth.get_tokens_authorization_code_grant(code)
token = auth.retrieve_token()
result_payload = {
"user_id": cache_payload.get("user_id"),
"client_id": cache_payload.get("client_id"),
"client_secret": cache_payload.get("client_secret"),
"access_token": token.access_token,
"refresh_token": token.refresh_token,
}
REDIS_CONN.set_obj(_web_result_cache_key(flow_id, "box"), result_payload, WEB_FLOW_TTL_SECS)
REDIS_CONN.delete(_web_state_cache_key(flow_id, "box"))
return await _render_web_oauth_popup(flow_id, True, "Authorization completed successfully.", "box")
@manager.route("/box/oauth/web/result", methods=["POST"]) # noqa: F821
@login_required
@validate_request("flow_id")
async def poll_box_web_result():
req = await get_request_json()
flow_id = req.get("flow_id")
cache_blob = REDIS_CONN.get(_web_result_cache_key(flow_id, "box"))
if not cache_blob:
return get_json_result(code=RetCode.RUNNING, message="Authorization is still pending.")
cache_raw = json.loads(cache_blob)
if cache_raw.get("user_id") != current_user.id:
return get_json_result(code=RetCode.PERMISSION_ERROR, message="You are not allowed to access this authorization result.")
REDIS_CONN.delete(_web_result_cache_key(flow_id, "box"))
return get_json_result(data={"credentials": cache_raw})
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/connector_app.py",
"license": "Apache License 2.0",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:api/db/services/connector_service.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
import os
from typing import Tuple, List
from anthropic import BaseModel
from peewee import SQL, fn
from api.db import InputType
from api.db.db_models import Connector, SyncLogs, Connector2Kb, Knowledgebase
from api.db.services.common_service import CommonService
from api.db.services.document_service import DocumentService
from api.db.services.document_service import DocMetadataService
from common.misc_utils import get_uuid
from common.constants import TaskStatus
from common.time_utils import current_timestamp, timestamp_to_date
class ConnectorService(CommonService):
model = Connector
@classmethod
def resume(cls, connector_id, status):
for c2k in Connector2KbService.query(connector_id=connector_id):
task = SyncLogsService.get_latest_task(connector_id, c2k.kb_id)
if not task:
if status == TaskStatus.SCHEDULE:
SyncLogsService.schedule(connector_id, c2k.kb_id)
ConnectorService.update_by_id(connector_id, {"status": status})
return
if task.status == TaskStatus.DONE:
if status == TaskStatus.SCHEDULE:
SyncLogsService.schedule(connector_id, c2k.kb_id, task.poll_range_end, total_docs_indexed=task.total_docs_indexed)
ConnectorService.update_by_id(connector_id, {"status": status})
return
task = task.to_dict()
task["status"] = status
SyncLogsService.update_by_id(task["id"], task)
ConnectorService.update_by_id(connector_id, {"status": status})
@classmethod
def list(cls, tenant_id):
fields = [
cls.model.id,
cls.model.name,
cls.model.source,
cls.model.status
]
return list(cls.model.select(*fields).where(
cls.model.tenant_id == tenant_id
).dicts())
@classmethod
def rebuild(cls, kb_id:str, connector_id: str, tenant_id:str):
from api.db.services.file_service import FileService
e, conn = cls.get_by_id(connector_id)
if not e:
return None
SyncLogsService.filter_delete([SyncLogs.connector_id==connector_id, SyncLogs.kb_id==kb_id])
docs = DocumentService.query(source_type=f"{conn.source}/{conn.id}", kb_id=kb_id)
err = FileService.delete_docs([d.id for d in docs], tenant_id)
SyncLogsService.schedule(connector_id, kb_id, reindex=True)
return err
class SyncLogsService(CommonService):
model = SyncLogs
@classmethod
def list_sync_tasks(cls, connector_id=None, page_number=None, items_per_page=15) -> Tuple[List[dict], int]:
fields = [
cls.model.id,
cls.model.connector_id,
cls.model.kb_id,
cls.model.update_date,
cls.model.poll_range_start,
cls.model.poll_range_end,
cls.model.new_docs_indexed,
cls.model.total_docs_indexed,
cls.model.error_msg,
cls.model.full_exception_trace,
cls.model.error_count,
Connector.name,
Connector.source,
Connector.tenant_id,
Connector.timeout_secs,
Knowledgebase.name.alias("kb_name"),
Knowledgebase.avatar.alias("kb_avatar"),
Connector2Kb.auto_parse,
cls.model.from_beginning.alias("reindex"),
cls.model.status,
cls.model.update_time
]
if not connector_id:
fields.append(Connector.config)
query = cls.model.select(*fields)\
.join(Connector, on=(cls.model.connector_id==Connector.id))\
.join(Connector2Kb, on=(cls.model.kb_id==Connector2Kb.kb_id))\
.join(Knowledgebase, on=(cls.model.kb_id==Knowledgebase.id))
if connector_id:
query = query.where(cls.model.connector_id == connector_id)
else:
database_type = os.getenv("DB_TYPE", "mysql")
if "postgres" in database_type.lower():
interval_expr = SQL("make_interval(mins => t2.refresh_freq)")
else:
interval_expr = SQL("INTERVAL `t2`.`refresh_freq` MINUTE")
query = query.where(
Connector.input_type == InputType.POLL,
Connector.status == TaskStatus.SCHEDULE,
cls.model.status == TaskStatus.SCHEDULE,
cls.model.update_date < (fn.NOW() - interval_expr)
)
query = query.distinct().order_by(cls.model.update_time.desc())
total = query.count()
if page_number:
query = query.paginate(page_number, items_per_page)
return list(query.dicts()), total
@classmethod
def start(cls, id, connector_id):
cls.update_by_id(id, {"status": TaskStatus.RUNNING, "time_started": datetime.now().strftime('%Y-%m-%d %H:%M:%S') })
ConnectorService.update_by_id(connector_id, {"status": TaskStatus.RUNNING})
@classmethod
def done(cls, id, connector_id):
cls.update_by_id(id, {"status": TaskStatus.DONE})
ConnectorService.update_by_id(connector_id, {"status": TaskStatus.DONE})
@classmethod
def schedule(cls, connector_id, kb_id, poll_range_start=None, reindex=False, total_docs_indexed=0):
try:
if cls.model.select().where(cls.model.kb_id == kb_id, cls.model.connector_id == connector_id).count() > 100:
rm_ids = [m.id for m in cls.model.select(cls.model.id).where(cls.model.kb_id == kb_id, cls.model.connector_id == connector_id).order_by(cls.model.update_time.asc()).limit(70)]
deleted = cls.model.delete().where(cls.model.id.in_(rm_ids)).execute()
logging.info(f"[SyncLogService] Cleaned {deleted} old logs.")
except Exception as e:
logging.exception(e)
try:
e = cls.query(kb_id=kb_id, connector_id=connector_id, status=TaskStatus.SCHEDULE)
if e:
logging.warning(f"{kb_id}--{connector_id} has already had a scheduling sync task which is abnormal.")
return None
reindex = "1" if reindex else "0"
ConnectorService.update_by_id(connector_id, {"status": TaskStatus.SCHEDULE})
return cls.save(**{
"id": get_uuid(),
"kb_id": kb_id, "status": TaskStatus.SCHEDULE, "connector_id": connector_id,
"poll_range_start": poll_range_start, "from_beginning": reindex,
"total_docs_indexed": total_docs_indexed
})
except Exception as e:
logging.exception(e)
task = cls.get_latest_task(connector_id, kb_id)
if task:
cls.model.update(status=TaskStatus.SCHEDULE,
poll_range_start=poll_range_start,
error_msg=cls.model.error_msg + str(e),
full_exception_trace=cls.model.full_exception_trace + str(e)
) \
.where(cls.model.id == task.id).execute()
ConnectorService.update_by_id(connector_id, {"status": TaskStatus.SCHEDULE})
@classmethod
def increase_docs(cls, id, min_update, max_update, doc_num, err_msg="", error_count=0):
cls.model.update(new_docs_indexed=cls.model.new_docs_indexed + doc_num,
total_docs_indexed=cls.model.total_docs_indexed + doc_num,
poll_range_start=fn.COALESCE(fn.LEAST(cls.model.poll_range_start,min_update), min_update),
poll_range_end=fn.COALESCE(fn.GREATEST(cls.model.poll_range_end, max_update), max_update),
error_msg=cls.model.error_msg + err_msg,
error_count=cls.model.error_count + error_count,
update_time=current_timestamp(),
update_date=timestamp_to_date(current_timestamp())
)\
.where(cls.model.id == id).execute()
@classmethod
def duplicate_and_parse(cls, kb, docs, tenant_id, src, auto_parse=True):
from api.db.services.file_service import FileService
if not docs:
return None
class FileObj(BaseModel):
id: str
filename: str
blob: bytes
def read(self) -> bytes:
return self.blob
errs = []
files = [FileObj(id=d["id"], filename=d["semantic_identifier"]+(f"{d['extension']}" if d["semantic_identifier"][::-1].find(d['extension'][::-1])<0 else ""), blob=d["blob"]) for d in docs]
doc_ids = []
err, doc_blob_pairs = FileService.upload_document(kb, files, tenant_id, src)
errs.extend(err)
# Create a mapping from filename to metadata for later use
metadata_map = {}
for d in docs:
if d.get("metadata"):
filename = d["semantic_identifier"]+(f"{d['extension']}" if d["semantic_identifier"][::-1].find(d['extension'][::-1])<0 else "")
metadata_map[filename] = d["metadata"]
kb_table_num_map = {}
for doc, _ in doc_blob_pairs:
doc_ids.append(doc["id"])
# Set metadata if available for this document
if doc["name"] in metadata_map:
DocMetadataService.update_document_metadata(doc["id"], metadata_map[doc["name"]])
if not auto_parse or auto_parse == "0":
continue
DocumentService.run(tenant_id, doc, kb_table_num_map)
return errs, doc_ids
@classmethod
def get_latest_task(cls, connector_id, kb_id):
return cls.model.select().where(
cls.model.connector_id==connector_id,
cls.model.kb_id == kb_id
).order_by(cls.model.update_time.desc()).first()
class Connector2KbService(CommonService):
model = Connector2Kb
@classmethod
def link_connectors(cls, kb_id:str, connectors: list[dict], tenant_id:str):
arr = cls.query(kb_id=kb_id)
old_conn_ids = [a.connector_id for a in arr]
connector_ids = []
for conn in connectors:
conn_id = conn["id"]
connector_ids.append(conn_id)
if conn_id in old_conn_ids:
cls.filter_update([cls.model.connector_id==conn_id, cls.model.kb_id==kb_id], {"auto_parse": conn.get("auto_parse", "1")})
continue
cls.save(**{
"id": get_uuid(),
"connector_id": conn_id,
"kb_id": kb_id,
"auto_parse": conn.get("auto_parse", "1")
})
SyncLogsService.schedule(conn_id, kb_id, reindex=True)
errs = []
for conn_id in old_conn_ids:
if conn_id in connector_ids:
continue
cls.filter_delete([cls.model.kb_id==kb_id, cls.model.connector_id==conn_id])
e, conn = ConnectorService.get_by_id(conn_id)
if not e:
continue
#SyncLogsService.filter_delete([SyncLogs.connector_id==conn_id, SyncLogs.kb_id==kb_id])
# Do not delete docs while unlinking.
SyncLogsService.filter_update([SyncLogs.connector_id==conn_id, SyncLogs.kb_id==kb_id, SyncLogs.status.in_([TaskStatus.SCHEDULE, TaskStatus.RUNNING])], {"status": TaskStatus.CANCEL})
#docs = DocumentService.query(source_type=f"{conn.source}/{conn.id}")
#err = FileService.delete_docs([d.id for d in docs], tenant_id)
#if err:
# errs.append(err)
return "\n".join(errs)
@classmethod
def list_connectors(cls, kb_id):
fields = [
Connector.id,
Connector.source,
Connector.name,
cls.model.auto_parse,
Connector.status
]
return list(cls.model.select(*fields)\
.join(Connector, on=(cls.model.connector_id==Connector.id))\
.where(
cls.model.kb_id==kb_id
).dicts()
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/db/services/connector_service.py",
"license": "Apache License 2.0",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/blob_connector.py | """Blob storage connector"""
import logging
import os
from datetime import datetime, timezone
from typing import Any, Optional
from common.data_source.utils import (
create_s3_client,
detect_bucket_region,
download_object,
extract_size_bytes,
get_file_ext,
)
from common.data_source.config import BlobType, DocumentSource, BLOB_STORAGE_SIZE_THRESHOLD, INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError
)
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, SecondsSinceUnixEpoch, GenerateDocumentsOutput
class BlobStorageConnector(LoadConnector, PollConnector):
"""Blob storage connector"""
def __init__(
self,
bucket_type: str,
bucket_name: str,
prefix: str = "",
batch_size: int = INDEX_BATCH_SIZE,
european_residency: bool = False,
) -> None:
self.bucket_type: BlobType = BlobType(bucket_type)
self.bucket_name = bucket_name.strip()
self.prefix = prefix if not prefix or prefix.endswith("/") else prefix + "/"
self.batch_size = batch_size
self.s3_client: Optional[Any] = None
self._allow_images: bool | None = None
self.size_threshold: int | None = BLOB_STORAGE_SIZE_THRESHOLD
self.bucket_region: Optional[str] = None
self.european_residency: bool = european_residency
def set_allow_images(self, allow_images: bool) -> None:
"""Set whether to process images"""
logging.info(f"Setting allow_images to {allow_images}.")
self._allow_images = allow_images
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load credentials"""
logging.debug(
f"Loading credentials for {self.bucket_name} of type {self.bucket_type}"
)
# Validate credentials
if self.bucket_type == BlobType.R2:
if not all(
credentials.get(key)
for key in ["r2_access_key_id", "r2_secret_access_key", "account_id"]
):
raise ConnectorMissingCredentialError("Cloudflare R2")
elif self.bucket_type == BlobType.S3:
authentication_method = credentials.get("authentication_method", "access_key")
if authentication_method == "access_key":
if not all(
credentials.get(key)
for key in ["aws_access_key_id", "aws_secret_access_key"]
):
raise ConnectorMissingCredentialError("Amazon S3")
elif authentication_method == "iam_role":
if not credentials.get("aws_role_arn"):
raise ConnectorMissingCredentialError("Amazon S3 IAM role ARN is required")
elif authentication_method == "assume_role":
pass
else:
raise ConnectorMissingCredentialError("Unsupported S3 authentication method")
elif self.bucket_type == BlobType.GOOGLE_CLOUD_STORAGE:
if not all(
credentials.get(key) for key in ["access_key_id", "secret_access_key"]
):
raise ConnectorMissingCredentialError("Google Cloud Storage")
elif self.bucket_type == BlobType.OCI_STORAGE:
if not all(
credentials.get(key)
for key in ["namespace", "region", "access_key_id", "secret_access_key"]
):
raise ConnectorMissingCredentialError("Oracle Cloud Infrastructure")
elif self.bucket_type == BlobType.S3_COMPATIBLE:
if not all(
credentials.get(key)
for key in ["endpoint_url", "aws_access_key_id", "aws_secret_access_key", "addressing_style"]
):
raise ConnectorMissingCredentialError("S3 Compatible Storage")
else:
raise ValueError(f"Unsupported bucket type: {self.bucket_type}")
# Create S3 client
self.s3_client = create_s3_client(
self.bucket_type, credentials, self.european_residency
)
# Detect bucket region (only important for S3)
if self.bucket_type == BlobType.S3:
self.bucket_region = detect_bucket_region(self.s3_client, self.bucket_name)
return None
def _yield_blob_objects(
self,
start: datetime,
end: datetime,
) -> GenerateDocumentsOutput:
"""Generate bucket objects"""
if self.s3_client is None:
raise ConnectorMissingCredentialError("Blob storage")
paginator = self.s3_client.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=self.bucket_name, Prefix=self.prefix)
# Collect all objects first to count filename occurrences
all_objects = []
for page in pages:
if "Contents" not in page:
continue
for obj in page["Contents"]:
if obj["Key"].endswith("/"):
continue
last_modified = obj["LastModified"].replace(tzinfo=timezone.utc)
if start < last_modified <= end:
all_objects.append(obj)
# Count filename occurrences to determine which need full paths
filename_counts: dict[str, int] = {}
for obj in all_objects:
file_name = os.path.basename(obj["Key"])
filename_counts[file_name] = filename_counts.get(file_name, 0) + 1
batch: list[Document] = []
for obj in all_objects:
last_modified = obj["LastModified"].replace(tzinfo=timezone.utc)
file_name = os.path.basename(obj["Key"])
key = obj["Key"]
size_bytes = extract_size_bytes(obj)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{file_name} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
try:
blob = download_object(self.s3_client, self.bucket_name, key, self.size_threshold)
if blob is None:
continue
# Use full path only if filename appears multiple times
if filename_counts.get(file_name, 0) > 1:
relative_path = key
if self.prefix and key.startswith(self.prefix):
relative_path = key[len(self.prefix):]
semantic_id = relative_path.replace('/', ' / ') if relative_path else file_name
else:
semantic_id = file_name
batch.append(
Document(
id=f"{self.bucket_type}:{self.bucket_name}:{key}",
blob=blob,
source=DocumentSource(self.bucket_type.value),
semantic_identifier=semantic_id,
extension=get_file_ext(file_name),
doc_updated_at=last_modified,
size_bytes=size_bytes if size_bytes else 0
)
)
if len(batch) == self.batch_size:
yield batch
batch = []
except Exception:
logging.exception(f"Error decoding object {key}")
if batch:
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load documents from state"""
logging.debug("Loading blob objects")
return self._yield_blob_objects(
start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime.now(timezone.utc),
)
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
"""Poll source to get documents"""
if self.s3_client is None:
raise ConnectorMissingCredentialError("Blob storage")
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
for batch in self._yield_blob_objects(start_datetime, end_datetime):
yield batch
def validate_connector_settings(self) -> None:
"""Validate connector settings"""
if self.s3_client is None:
raise ConnectorMissingCredentialError(
"Blob storage credentials not loaded."
)
if not self.bucket_name:
raise ConnectorValidationError(
"No bucket name was provided in connector settings."
)
try:
# Lightweight validation step
self.s3_client.list_objects_v2(
Bucket=self.bucket_name, Prefix=self.prefix, MaxKeys=1
)
except Exception as e:
error_code = getattr(e, 'response', {}).get('Error', {}).get('Code', '')
status_code = getattr(e, 'response', {}).get('ResponseMetadata', {}).get('HTTPStatusCode')
# Common S3 error scenarios
if error_code in [
"AccessDenied",
"InvalidAccessKeyId",
"SignatureDoesNotMatch",
]:
if status_code == 403 or error_code == "AccessDenied":
raise InsufficientPermissionsError(
f"Insufficient permissions to list objects in bucket '{self.bucket_name}'. "
"Please check your bucket policy and/or IAM policy."
)
if status_code == 401 or error_code == "SignatureDoesNotMatch":
raise CredentialExpiredError(
"Provided blob storage credentials appear invalid or expired."
)
raise CredentialExpiredError(
f"Credential issue encountered ({error_code})."
)
if error_code == "NoSuchBucket" or status_code == 404:
raise ConnectorValidationError(
f"Bucket '{self.bucket_name}' does not exist or cannot be found."
)
raise ConnectorValidationError(
f"Unexpected S3 client error (code={error_code}, status={status_code}): {e}"
)
if __name__ == "__main__":
# Example usage
credentials_dict = {
"aws_access_key_id": os.environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("AWS_SECRET_ACCESS_KEY"),
}
# Initialize connector
connector = BlobStorageConnector(
bucket_type=os.environ.get("BUCKET_TYPE") or "s3",
bucket_name=os.environ.get("BUCKET_NAME") or "yyboombucket",
prefix="",
)
try:
connector.load_credentials(credentials_dict)
document_batch_generator = connector.load_from_state()
for document_batch in document_batch_generator:
print("First batch of documents:")
for doc in document_batch:
print(f"Document ID: {doc.id}")
print(f"Semantic Identifier: {doc.semantic_identifier}")
print(f"Source: {doc.source}")
print(f"Updated At: {doc.doc_updated_at}")
print("---")
break
except ConnectorMissingCredentialError as e:
print(f"Error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/blob_connector.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/config.py | """Configuration constants and enum definitions"""
import json
import os
from datetime import datetime, timezone
from enum import Enum
from typing import cast
def get_current_tz_offset() -> int:
# datetime now() gets local time, datetime.now(timezone.utc) gets UTC time.
# remove tzinfo to compare non-timezone-aware objects.
time_diff = datetime.now() - datetime.now(timezone.utc).replace(tzinfo=None)
return round(time_diff.total_seconds() / 3600)
# Default request timeout, mostly used by connectors
REQUEST_TIMEOUT_SECONDS = int(os.environ.get("REQUEST_TIMEOUT_SECONDS") or 60)
ONE_MINUTE = 60
ONE_HOUR = 3600
ONE_DAY = ONE_HOUR * 24
# Slack API limits
_SLACK_LIMIT = 900
# Redis lock configuration
ONYX_SLACK_LOCK_TTL = 1800
ONYX_SLACK_LOCK_BLOCKING_TIMEOUT = 60
ONYX_SLACK_LOCK_TOTAL_BLOCKING_TIMEOUT = 3600
class BlobType(str, Enum):
"""Supported storage types"""
S3 = "s3"
R2 = "r2"
GOOGLE_CLOUD_STORAGE = "google_cloud_storage"
OCI_STORAGE = "oci_storage"
S3_COMPATIBLE = "s3_compatible"
class DocumentSource(str, Enum):
"""Document sources"""
S3 = "s3"
NOTION = "notion"
R2 = "r2"
GOOGLE_CLOUD_STORAGE = "google_cloud_storage"
OCI_STORAGE = "oci_storage"
SLACK = "slack"
CONFLUENCE = "confluence"
JIRA = "jira"
GOOGLE_DRIVE = "google_drive"
GMAIL = "gmail"
DISCORD = "discord"
WEBDAV = "webdav"
MOODLE = "moodle"
S3_COMPATIBLE = "s3_compatible"
DROPBOX = "dropbox"
BOX = "box"
AIRTABLE = "airtable"
ASANA = "asana"
GITHUB = "github"
GITLAB = "gitlab"
IMAP = "imap"
BITBUCKET = "bitbucket"
ZENDESK = "zendesk"
SEAFILE = "seafile"
MYSQL = "mysql"
POSTGRESQL = "postgresql"
class FileOrigin(str, Enum):
"""File origins"""
CONNECTOR = "connector"
# Standard image MIME types supported by most vision LLMs
IMAGE_MIME_TYPES = [
"image/png",
"image/jpeg",
"image/jpg",
"image/webp",
]
# Image types that should be excluded from processing
EXCLUDED_IMAGE_TYPES = [
"image/bmp",
"image/tiff",
"image/gif",
"image/svg+xml",
"image/avif",
]
_PAGE_EXPANSION_FIELDS = [
"body.storage.value",
"version",
"space",
"metadata.labels",
"history.lastUpdated",
"ancestors",
]
# Configuration constants
BLOB_STORAGE_SIZE_THRESHOLD = 20 * 1024 * 1024 # 20MB
INDEX_BATCH_SIZE = 2
SLACK_NUM_THREADS = 4
ENABLE_EXPENSIVE_EXPERT_CALLS = False
# Slack related constants
_SLACK_LIMIT = 900
FAST_TIMEOUT = 1
MAX_RETRIES = 7
MAX_CHANNELS_TO_LOG = 50
BOT_CHANNEL_MIN_BATCH_SIZE = 256
BOT_CHANNEL_PERCENTAGE_THRESHOLD = 0.95
# Download configuration
DOWNLOAD_CHUNK_SIZE = 1024 * 1024 # 1MB
SIZE_THRESHOLD_BUFFER = 64
NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP = (
os.environ.get("NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP", "").lower()
== "true"
)
SLIM_BATCH_SIZE = 100
# Notion API constants
_NOTION_PAGE_SIZE = 100
_NOTION_CALL_TIMEOUT = 30 # 30 seconds
_ITERATION_LIMIT = 100_000
#####
# Indexing Configs
#####
# NOTE: Currently only supported in the Confluence and Google Drive connectors +
# only handles some failures (Confluence = handles API call failures, Google
# Drive = handles failures pulling files / parsing them)
CONTINUE_ON_CONNECTOR_FAILURE = os.environ.get(
"CONTINUE_ON_CONNECTOR_FAILURE", ""
).lower() not in ["false", ""]
#####
# Confluence Connector Configs
#####
CONFLUENCE_CONNECTOR_LABELS_TO_SKIP = [
ignored_tag
for ignored_tag in os.environ.get("CONFLUENCE_CONNECTOR_LABELS_TO_SKIP", "").split(
","
)
if ignored_tag
]
# Avoid to get archived pages
CONFLUENCE_CONNECTOR_INDEX_ARCHIVED_PAGES = (
os.environ.get("CONFLUENCE_CONNECTOR_INDEX_ARCHIVED_PAGES", "").lower() == "true"
)
# Attachments exceeding this size will not be retrieved (in bytes)
CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD = int(
os.environ.get("CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
# Attachments with more chars than this will not be indexed. This is to prevent extremely
# large files from freezing indexing. 200,000 is ~100 google doc pages.
CONFLUENCE_CONNECTOR_ATTACHMENT_CHAR_COUNT_THRESHOLD = int(
os.environ.get("CONFLUENCE_CONNECTOR_ATTACHMENT_CHAR_COUNT_THRESHOLD", 200_000)
)
_RAW_CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE = os.environ.get(
"CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE", ""
)
CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE = cast(
list[dict[str, str]] | None,
(
json.loads(_RAW_CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE)
if _RAW_CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE
else None
),
)
# enter as a floating point offset from UTC in hours (-24 < val < 24)
# this will be applied globally, so it probably makes sense to transition this to per
# connector as some point.
# For the default value, we assume that the user's local timezone is more likely to be
# correct (i.e. the configured user's timezone or the default server one) than UTC.
# https://developer.atlassian.com/cloud/confluence/cql-fields/#created
CONFLUENCE_TIMEZONE_OFFSET = float(
os.environ.get("CONFLUENCE_TIMEZONE_OFFSET", get_current_tz_offset())
)
CONFLUENCE_SYNC_TIME_BUFFER_SECONDS = int(
os.environ.get("CONFLUENCE_SYNC_TIME_BUFFER_SECONDS", ONE_DAY)
)
GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
JIRA_CONNECTOR_LABELS_TO_SKIP = [
ignored_tag
for ignored_tag in os.environ.get("JIRA_CONNECTOR_LABELS_TO_SKIP", "").split(",")
if ignored_tag
]
JIRA_CONNECTOR_MAX_TICKET_SIZE = int(
os.environ.get("JIRA_CONNECTOR_MAX_TICKET_SIZE", 100 * 1024)
)
JIRA_SYNC_TIME_BUFFER_SECONDS = int(
os.environ.get("JIRA_SYNC_TIME_BUFFER_SECONDS", ONE_MINUTE)
)
JIRA_TIMEZONE_OFFSET = float(
os.environ.get("JIRA_TIMEZONE_OFFSET", get_current_tz_offset())
)
OAUTH_SLACK_CLIENT_ID = os.environ.get("OAUTH_SLACK_CLIENT_ID", "")
OAUTH_SLACK_CLIENT_SECRET = os.environ.get("OAUTH_SLACK_CLIENT_SECRET", "")
OAUTH_CONFLUENCE_CLOUD_CLIENT_ID = os.environ.get(
"OAUTH_CONFLUENCE_CLOUD_CLIENT_ID", ""
)
OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET = os.environ.get(
"OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET", ""
)
OAUTH_JIRA_CLOUD_CLIENT_ID = os.environ.get("OAUTH_JIRA_CLOUD_CLIENT_ID", "")
OAUTH_JIRA_CLOUD_CLIENT_SECRET = os.environ.get("OAUTH_JIRA_CLOUD_CLIENT_SECRET", "")
OAUTH_GOOGLE_DRIVE_CLIENT_ID = os.environ.get("OAUTH_GOOGLE_DRIVE_CLIENT_ID", "")
OAUTH_GOOGLE_DRIVE_CLIENT_SECRET = os.environ.get(
"OAUTH_GOOGLE_DRIVE_CLIENT_SECRET", ""
)
GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI = os.environ.get("GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/google-drive/oauth/web/callback")
GMAIL_WEB_OAUTH_REDIRECT_URI = os.environ.get("GMAIL_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/gmail/oauth/web/callback")
CONFLUENCE_OAUTH_TOKEN_URL = "https://auth.atlassian.com/oauth/token"
RATE_LIMIT_MESSAGE_LOWERCASE = "Rate limit exceeded".lower()
_DEFAULT_PAGINATION_LIMIT = 1000
_PROBLEMATIC_EXPANSIONS = "body.storage.value"
_REPLACEMENT_EXPANSIONS = "body.view.value"
BOX_WEB_OAUTH_REDIRECT_URI = os.environ.get("BOX_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/box/oauth/web/callback")
GITHUB_CONNECTOR_BASE_URL = os.environ.get("GITHUB_CONNECTOR_BASE_URL") or None
class HtmlBasedConnectorTransformLinksStrategy(str, Enum):
# remove links entirely
STRIP = "strip"
# turn HTML links into markdown links
MARKDOWN = "markdown"
HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY = os.environ.get(
"HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY",
HtmlBasedConnectorTransformLinksStrategy.STRIP,
)
PARSE_WITH_TRAFILATURA = os.environ.get("PARSE_WITH_TRAFILATURA", "").lower() == "true"
WEB_CONNECTOR_IGNORED_CLASSES = os.environ.get(
"WEB_CONNECTOR_IGNORED_CLASSES", "sidebar,footer"
).split(",")
WEB_CONNECTOR_IGNORED_ELEMENTS = os.environ.get(
"WEB_CONNECTOR_IGNORED_ELEMENTS", "nav,footer,meta,script,style,symbol,aside"
).split(",")
AIRTABLE_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("AIRTABLE_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
ASANA_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("ASANA_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
IMAP_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("IMAP_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS = os.environ.get(
"ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS", ""
).split(",")
_USER_NOT_FOUND = "Unknown Confluence User"
_COMMENT_EXPANSION_FIELDS = ["body.storage.value"]
_ATTACHMENT_EXPANSION_FIELDS = [
"version",
"space",
"metadata.labels",
]
_RESTRICTIONS_EXPANSION_FIELDS = [
"space",
"restrictions.read.restrictions.user",
"restrictions.read.restrictions.group",
"ancestors.restrictions.read.restrictions.user",
"ancestors.restrictions.read.restrictions.group",
]
_SLIM_DOC_BATCH_SIZE = 5000
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/config.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/confluence_connector.py |
"""Confluence connector"""
import copy
import json
import logging
import time
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Any, cast, Iterator, Callable, Generator
import requests
from typing_extensions import override
from urllib.parse import quote
import bs4
from atlassian.errors import ApiError
from atlassian import Confluence
from requests.exceptions import HTTPError
from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource, CONTINUE_ON_CONNECTOR_FAILURE, \
CONFLUENCE_CONNECTOR_LABELS_TO_SKIP, CONFLUENCE_TIMEZONE_OFFSET, CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE, \
CONFLUENCE_SYNC_TIME_BUFFER_SECONDS, \
OAUTH_CONFLUENCE_CLOUD_CLIENT_ID, OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET, _DEFAULT_PAGINATION_LIMIT, \
_PROBLEMATIC_EXPANSIONS, _REPLACEMENT_EXPANSIONS, _USER_NOT_FOUND, _COMMENT_EXPANSION_FIELDS, \
_ATTACHMENT_EXPANSION_FIELDS, _PAGE_EXPANSION_FIELDS, ONE_DAY, ONE_HOUR, _RESTRICTIONS_EXPANSION_FIELDS, \
_SLIM_DOC_BATCH_SIZE, CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
InsufficientPermissionsError,
UnexpectedValidationError, CredentialExpiredError
)
from common.data_source.html_utils import format_document_soup
from common.data_source.interfaces import (
ConnectorCheckpoint,
CredentialsConnector,
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync, StaticCredentialsProvider, CheckpointedConnector, SlimConnector,
CredentialsProviderInterface, ConfluenceUser, IndexingHeartbeatInterface, AttachmentProcessingResult,
CheckpointOutput
)
from common.data_source.models import ConnectorFailure, Document, TextSection, ImageSection, BasicExpertInfo, \
DocumentFailure, GenerateSlimDocumentOutput, SlimDocument, ExternalAccess
from common.data_source.utils import load_all_docs_from_checkpoint_connector, scoped_url, \
process_confluence_user_profiles_override, confluence_refresh_tokens, run_with_timeout, _handle_http_error, \
update_param_in_path, get_start_param_from_url, build_confluence_document_id, datetime_from_string, \
is_atlassian_date_error, validate_attachment_filetype
from rag.utils.redis_conn import RedisDB, REDIS_CONN
_USER_ID_TO_DISPLAY_NAME_CACHE: dict[str, str | None] = {}
_USER_EMAIL_CACHE: dict[str, str | None] = {}
class ConfluenceCheckpoint(ConnectorCheckpoint):
next_page_url: str | None
class ConfluenceRateLimitError(Exception):
pass
class OnyxConfluence:
"""
This is a custom Confluence class that:
A. overrides the default Confluence class to add a custom CQL method.
B.
This is necessary because the default Confluence class does not properly support cql expansions.
All methods are automatically wrapped with handle_confluence_rate_limit.
"""
CREDENTIAL_PREFIX = "connector:confluence:credential"
CREDENTIAL_TTL = 300 # 5 min
PROBE_TIMEOUT = 5 # 5 seconds
def __init__(
self,
is_cloud: bool,
url: str,
credentials_provider: CredentialsProviderInterface,
timeout: int | None = None,
scoped_token: bool = False,
# should generally not be passed in, but making it overridable for
# easier testing
confluence_user_profiles_override: list[dict[str, str]] | None = (
CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE
),
) -> None:
self.base_url = url #'/'.join(url.rstrip("/").split("/")[:-1])
url = scoped_url(url, "confluence") if scoped_token else url
self._is_cloud = is_cloud
self._url = url.rstrip("/")
self._credentials_provider = credentials_provider
self.scoped_token = scoped_token
self.redis_client: RedisDB | None = None
self.static_credentials: dict[str, Any] | None = None
if self._credentials_provider.is_dynamic():
self.redis_client = REDIS_CONN
else:
self.static_credentials = self._credentials_provider.get_credentials()
self._confluence = Confluence(url)
self.credential_key: str = (
self.CREDENTIAL_PREFIX
+ f":credential_{self._credentials_provider.get_provider_key()}"
)
self._kwargs: Any = None
self.shared_base_kwargs: dict[str, str | int | bool] = {
"api_version": "cloud" if is_cloud else "latest",
"backoff_and_retry": True,
"cloud": is_cloud,
}
if timeout:
self.shared_base_kwargs["timeout"] = timeout
self._confluence_user_profiles_override = (
process_confluence_user_profiles_override(confluence_user_profiles_override)
if confluence_user_profiles_override
else None
)
def _renew_credentials(self) -> tuple[dict[str, Any], bool]:
"""credential_json - the current json credentials
Returns a tuple
1. The up-to-date credentials
2. True if the credentials were updated
This method is intended to be used within a distributed lock.
Lock, call this, update credentials if the tokens were refreshed, then release
"""
# static credentials are preloaded, so no locking/redis required
if self.static_credentials:
return self.static_credentials, False
if not self.redis_client:
raise RuntimeError("self.redis_client is None")
# dynamic credentials need locking
# check redis first, then fallback to the DB
credential_raw = self.redis_client.get(self.credential_key)
if credential_raw is not None:
credential_bytes = cast(bytes, credential_raw)
credential_str = credential_bytes.decode("utf-8")
credential_json: dict[str, Any] = json.loads(credential_str)
else:
credential_json = self._credentials_provider.get_credentials()
if "confluence_refresh_token" not in credential_json:
# static credentials ... cache them permanently and return
self.static_credentials = credential_json
return credential_json, False
if not OAUTH_CONFLUENCE_CLOUD_CLIENT_ID:
raise RuntimeError("OAUTH_CONFLUENCE_CLOUD_CLIENT_ID must be set!")
if not OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET:
raise RuntimeError("OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET must be set!")
# check if we should refresh tokens. we're deciding to refresh halfway
# to expiration
now = datetime.now(timezone.utc)
created_at = datetime.fromisoformat(credential_json["created_at"])
expires_in: int = credential_json["expires_in"]
renew_at = created_at + timedelta(seconds=expires_in // 2)
if now <= renew_at:
# cached/current credentials are reasonably up to date
return credential_json, False
# we need to refresh
logging.info("Renewing Confluence Cloud credentials...")
new_credentials = confluence_refresh_tokens(
OAUTH_CONFLUENCE_CLOUD_CLIENT_ID,
OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET,
credential_json["cloud_id"],
credential_json["confluence_refresh_token"],
)
# store the new credentials to redis and to the db through the provider
# redis: we use a 5 min TTL because we are given a 10 minutes grace period
# when keys are rotated. it's easier to expire the cached credentials
# reasonably frequently rather than trying to handle strong synchronization
# between the db and redis everywhere the credentials might be updated
new_credential_str = json.dumps(new_credentials)
self.redis_client.set(
self.credential_key, new_credential_str, exp=self.CREDENTIAL_TTL
)
self._credentials_provider.set_credentials(new_credentials)
return new_credentials, True
@staticmethod
def _make_oauth2_dict(credentials: dict[str, Any]) -> dict[str, Any]:
oauth2_dict: dict[str, Any] = {}
if "confluence_refresh_token" in credentials:
oauth2_dict["client_id"] = OAUTH_CONFLUENCE_CLOUD_CLIENT_ID
oauth2_dict["token"] = {}
oauth2_dict["token"]["access_token"] = credentials[
"confluence_access_token"
]
return oauth2_dict
def _probe_connection(
self,
**kwargs: Any,
) -> None:
merged_kwargs = {**self.shared_base_kwargs, **kwargs}
# add special timeout to make sure that we don't hang indefinitely
merged_kwargs["timeout"] = self.PROBE_TIMEOUT
with self._credentials_provider:
credentials, _ = self._renew_credentials()
if self.scoped_token:
# v2 endpoint doesn't always work with scoped tokens, use v1
token = credentials["confluence_access_token"]
probe_url = f"{self.base_url}/rest/api/space?limit=1"
import requests
logging.info(f"First and Last 5 of token: {token[:5]}...{token[-5:]}")
try:
r = requests.get(
probe_url,
headers={"Authorization": f"Bearer {token}"},
timeout=10,
)
r.raise_for_status()
except HTTPError as e:
if e.response.status_code == 403:
logging.warning(
"scoped token authenticated but not valid for probe endpoint (spaces)"
)
else:
if "WWW-Authenticate" in e.response.headers:
logging.warning(
f"WWW-Authenticate: {e.response.headers['WWW-Authenticate']}"
)
logging.warning(f"Full error: {e.response.text}")
raise e
return
# probe connection with direct client, no retries
if "confluence_refresh_token" in credentials:
logging.info("Probing Confluence with OAuth Access Token.")
oauth2_dict: dict[str, Any] = OnyxConfluence._make_oauth2_dict(
credentials
)
url = (
f"https://api.atlassian.com/ex/confluence/{credentials['cloud_id']}"
)
confluence_client_with_minimal_retries = Confluence(
url=url, oauth2=oauth2_dict, **merged_kwargs
)
else:
logging.info("Probing Confluence with Personal Access Token.")
url = self._url
if self._is_cloud:
logging.info("running with cloud client")
confluence_client_with_minimal_retries = Confluence(
url=url,
username=credentials["confluence_username"],
password=credentials["confluence_access_token"],
**merged_kwargs,
)
else:
confluence_client_with_minimal_retries = Confluence(
url=url,
token=credentials["confluence_access_token"],
**merged_kwargs,
)
# This call sometimes hangs indefinitely, so we run it in a timeout
spaces = run_with_timeout(
timeout=10,
func=confluence_client_with_minimal_retries.get_all_spaces,
limit=1,
)
# uncomment the following for testing
# the following is an attempt to retrieve the user's timezone
# Unfornately, all data is returned in UTC regardless of the user's time zone
# even tho CQL parses incoming times based on the user's time zone
# space_key = spaces["results"][0]["key"]
# space_details = confluence_client_with_minimal_retries.cql(f"space.key={space_key}+AND+type=space")
if not spaces:
raise RuntimeError(
f"No spaces found at {url}! "
"Check your credentials and wiki_base and make sure "
"is_cloud is set correctly."
)
logging.info("Confluence probe succeeded.")
def _initialize_connection(
self,
**kwargs: Any,
) -> None:
"""Called externally to init the connection in a thread safe manner."""
merged_kwargs = {**self.shared_base_kwargs, **kwargs}
with self._credentials_provider:
credentials, _ = self._renew_credentials()
self._confluence = self._initialize_connection_helper(
credentials, **merged_kwargs
)
self._kwargs = merged_kwargs
def _initialize_connection_helper(
self,
credentials: dict[str, Any],
**kwargs: Any,
) -> Confluence:
"""Called internally to init the connection. Distributed locking
to prevent multiple threads from modifying the credentials
must be handled around this function."""
confluence = None
# probe connection with direct client, no retries
if "confluence_refresh_token" in credentials:
logging.info("Connecting to Confluence Cloud with OAuth Access Token.")
oauth2_dict: dict[str, Any] = OnyxConfluence._make_oauth2_dict(credentials)
url = f"https://api.atlassian.com/ex/confluence/{credentials['cloud_id']}"
confluence = Confluence(url=url, oauth2=oauth2_dict, **kwargs)
else:
logging.info(
f"Connecting to Confluence with Personal Access Token as user: {credentials['confluence_username']}"
)
if self._is_cloud:
confluence = Confluence(
url=self._url,
username=credentials["confluence_username"],
password=credentials["confluence_access_token"],
**kwargs,
)
else:
confluence = Confluence(
url=self._url,
token=credentials["confluence_access_token"],
**kwargs,
)
return confluence
# https://developer.atlassian.com/cloud/confluence/rate-limiting/
# This uses the native rate limiting option provided by the
# confluence client and otherwise applies a simpler set of error handling.
def _make_rate_limited_confluence_method(
self, name: str, credential_provider: CredentialsProviderInterface | None
) -> Callable[..., Any]:
def wrapped_call(*args: list[Any], **kwargs: Any) -> Any:
MAX_RETRIES = 5
TIMEOUT = 600
timeout_at = time.monotonic() + TIMEOUT
for attempt in range(MAX_RETRIES):
if time.monotonic() > timeout_at:
raise TimeoutError(
f"Confluence call attempts took longer than {TIMEOUT} seconds."
)
# we're relying more on the client to rate limit itself
# and applying our own retries in a more specific set of circumstances
try:
if credential_provider:
with credential_provider:
credentials, renewed = self._renew_credentials()
if renewed:
self._confluence = self._initialize_connection_helper(
credentials, **self._kwargs
)
attr = getattr(self._confluence, name, None)
if attr is None:
# The underlying Confluence client doesn't have this attribute
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
return attr(*args, **kwargs)
else:
attr = getattr(self._confluence, name, None)
if attr is None:
# The underlying Confluence client doesn't have this attribute
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
return attr(*args, **kwargs)
except HTTPError as e:
delay_until = _handle_http_error(e, attempt)
logging.warning(
f"HTTPError in confluence call. "
f"Retrying in {delay_until} seconds..."
)
while time.monotonic() < delay_until:
# in the future, check a signal here to exit
time.sleep(1)
except AttributeError as e:
# Some error within the Confluence library, unclear why it fails.
# Users reported it to be intermittent, so just retry
if attempt == MAX_RETRIES - 1:
raise e
logging.exception(
"Confluence Client raised an AttributeError. Retrying..."
)
time.sleep(5)
return wrapped_call
def __getattr__(self, name: str) -> Any:
"""Dynamically intercept attribute/method access."""
attr = getattr(self._confluence, name, None)
if attr is None:
# The underlying Confluence client doesn't have this attribute
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
# If it's not a method, just return it after ensuring token validity
if not callable(attr):
return attr
# skip methods that start with "_"
if name.startswith("_"):
return attr
# wrap the method with our retry handler
rate_limited_method: Callable[..., Any] = (
self._make_rate_limited_confluence_method(name, self._credentials_provider)
)
return rate_limited_method
def _try_one_by_one_for_paginated_url(
self,
url_suffix: str,
initial_start: int,
limit: int,
) -> Generator[dict[str, Any], None, str | None]:
"""
Go through `limit` items, starting at `initial_start` one by one (e.g. using
`limit=1` for each call).
If we encounter an error, we skip the item and try the next one. We will return
the items we were able to retrieve successfully.
Returns the expected next url_suffix. Returns None if it thinks we've hit the end.
TODO (chris): make this yield failures as well as successes.
TODO (chris): make this work for confluence cloud somehow.
"""
if self._is_cloud:
raise RuntimeError("This method is not implemented for Confluence Cloud.")
found_empty_page = False
temp_url_suffix = url_suffix
for ind in range(limit):
try:
temp_url_suffix = update_param_in_path(
url_suffix, "start", str(initial_start + ind)
)
temp_url_suffix = update_param_in_path(temp_url_suffix, "limit", "1")
logging.info(f"Making recovery confluence call to {temp_url_suffix}")
raw_response = self.get(path=temp_url_suffix, advanced_mode=True)
raw_response.raise_for_status()
latest_results = raw_response.json().get("results", [])
yield from latest_results
if not latest_results:
# no more results, break out of the loop
logging.info(
f"No results found for call '{temp_url_suffix}'"
"Stopping pagination."
)
found_empty_page = True
break
except Exception:
logging.exception(
f"Error in confluence call to {temp_url_suffix}. Continuing."
)
if found_empty_page:
return None
# if we got here, we successfully tried `limit` items
return update_param_in_path(url_suffix, "start", str(initial_start + limit))
def _paginate_url(
self,
url_suffix: str,
limit: int | None = None,
# Called with the next url to use to get the next page
next_page_callback: Callable[[str], None] | None = None,
force_offset_pagination: bool = False,
) -> Iterator[dict[str, Any]]:
"""
This will paginate through the top level query.
"""
if not limit:
limit = _DEFAULT_PAGINATION_LIMIT
url_suffix = update_param_in_path(url_suffix, "limit", str(limit))
while url_suffix:
logging.debug(f"Making confluence call to {url_suffix}")
try:
raw_response = self.get(
path=url_suffix,
advanced_mode=True,
params={
"body-format": "atlas_doc_format",
"expand": "body.atlas_doc_format",
},
)
except Exception as e:
logging.exception(f"Error in confluence call to {url_suffix}")
raise e
try:
raw_response.raise_for_status()
except Exception as e:
logging.warning(f"Error in confluence call to {url_suffix}")
# If the problematic expansion is in the url, replace it
# with the replacement expansion and try again
# If that fails, raise the error
if _PROBLEMATIC_EXPANSIONS in url_suffix:
logging.warning(
f"Replacing {_PROBLEMATIC_EXPANSIONS} with {_REPLACEMENT_EXPANSIONS}"
" and trying again."
)
url_suffix = url_suffix.replace(
_PROBLEMATIC_EXPANSIONS,
_REPLACEMENT_EXPANSIONS,
)
continue
# If we fail due to a 500, try one by one.
# NOTE: this iterative approach only works for server, since cloud uses cursor-based
# pagination
if raw_response.status_code == 500 and not self._is_cloud:
initial_start = get_start_param_from_url(url_suffix)
if initial_start is None:
# can't handle this if we don't have offset-based pagination
raise
# this will just yield the successful items from the batch
new_url_suffix = yield from self._try_one_by_one_for_paginated_url(
url_suffix,
initial_start=initial_start,
limit=limit,
)
# this means we ran into an empty page
if new_url_suffix is None:
if next_page_callback:
next_page_callback("")
break
url_suffix = new_url_suffix
continue
else:
logging.exception(
f"Error in confluence call to {url_suffix} \n"
f"Raw Response Text: {raw_response.text} \n"
f"Full Response: {raw_response.__dict__} \n"
f"Error: {e} \n"
)
raise
try:
next_response = raw_response.json()
except Exception as e:
logging.exception(
f"Failed to parse response as JSON. Response: {raw_response.__dict__}"
)
raise e
# Yield the results individually.
results = cast(list[dict[str, Any]], next_response.get("results", []))
# Note 1:
# Make sure we don't update the start by more than the amount
# of results we were able to retrieve. The Confluence API has a
# weird behavior where if you pass in a limit that is too large for
# the configured server, it will artificially limit the amount of
# results returned BUT will not apply this to the start parameter.
# This will cause us to miss results.
#
# Note 2:
# We specifically perform manual yielding (i.e., `for x in xs: yield x`) as opposed to using a `yield from xs`
# because we *have to call the `next_page_callback`* prior to yielding the last element!
#
# If we did:
#
# ```py
# yield from results
# if next_page_callback:
# next_page_callback(url_suffix)
# ```
#
# then the logic would fail since the iterator would finish (and the calling scope would exit out of its driving
# loop) prior to the callback being called.
old_url_suffix = url_suffix
updated_start = get_start_param_from_url(old_url_suffix)
url_suffix = cast(str, next_response.get("_links", {}).get("next", ""))
for i, result in enumerate(results):
updated_start += 1
if url_suffix and next_page_callback and i == len(results) - 1:
# update the url if we're on the last result in the page
if not self._is_cloud:
# If confluence claims there are more results, we update the start param
# based on how many results were returned and try again.
url_suffix = update_param_in_path(
url_suffix, "start", str(updated_start)
)
# notify the caller of the new url
next_page_callback(url_suffix)
elif force_offset_pagination and i == len(results) - 1:
url_suffix = update_param_in_path(
old_url_suffix, "start", str(updated_start)
)
yield result
# we've observed that Confluence sometimes returns a next link despite giving
# 0 results. This is a bug with Confluence, so we need to check for it and
# stop paginating.
if url_suffix and not results:
logging.info(
f"No results found for call '{old_url_suffix}' despite next link "
"being present. Stopping pagination."
)
break
def build_cql_url(self, cql: str, expand: str | None = None) -> str:
expand_string = f"&expand={expand}" if expand else ""
return f"rest/api/content/search?cql={cql}{expand_string}"
def paginated_cql_retrieval(
self,
cql: str,
expand: str | None = None,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
The content/search endpoint can be used to fetch pages, attachments, and comments.
"""
cql_url = self.build_cql_url(cql, expand)
yield from self._paginate_url(cql_url, limit)
def paginated_page_retrieval(
self,
cql_url: str,
limit: int,
# Called with the next url to use to get the next page
next_page_callback: Callable[[str], None] | None = None,
) -> Iterator[dict[str, Any]]:
"""
Error handling (and testing) wrapper for _paginate_url,
because the current approach to page retrieval involves handling the
next page links manually.
"""
try:
yield from self._paginate_url(
cql_url, limit=limit, next_page_callback=next_page_callback
)
except Exception as e:
logging.exception(f"Error in paginated_page_retrieval: {e}")
raise e
def cql_paginate_all_expansions(
self,
cql: str,
expand: str | None = None,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This function will paginate through the top level query first, then
paginate through all the expansions.
"""
def _traverse_and_update(data: dict | list) -> None:
if isinstance(data, dict):
next_url = data.get("_links", {}).get("next")
if next_url and "results" in data:
data["results"].extend(self._paginate_url(next_url, limit=limit))
for value in data.values():
_traverse_and_update(value)
elif isinstance(data, list):
for item in data:
_traverse_and_update(item)
for confluence_object in self.paginated_cql_retrieval(cql, expand, limit):
_traverse_and_update(confluence_object)
yield confluence_object
def paginated_cql_user_retrieval(
self,
expand: str | None = None,
limit: int | None = None,
) -> Iterator[ConfluenceUser]:
"""
The search/user endpoint can be used to fetch users.
It's a separate endpoint from the content/search endpoint used only for users.
It's very similar to the content/search endpoint.
"""
# this is needed since there is a live bug with Confluence Server/Data Center
# where not all users are returned by the APIs. This is a workaround needed until
# that is patched.
if self._confluence_user_profiles_override:
yield from self._confluence_user_profiles_override
elif self._is_cloud:
cql = "type=user"
url = "rest/api/search/user"
expand_string = f"&expand={expand}" if expand else ""
url += f"?cql={cql}{expand_string}"
for user_result in self._paginate_url(
url, limit, force_offset_pagination=True
):
user = user_result["user"]
yield ConfluenceUser(
user_id=user["accountId"],
username=None,
display_name=user["displayName"],
email=user.get("email"),
type=user["accountType"],
)
else:
for user in self._paginate_url("rest/api/user/list", limit):
yield ConfluenceUser(
user_id=user["userKey"],
username=user["username"],
display_name=user["displayName"],
email=None,
type=user.get("type", "user"),
)
def paginated_groups_by_user_retrieval(
self,
user_id: str, # accountId in Cloud, userKey in Server
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This is not an SQL like query.
It's a confluence specific endpoint that can be used to fetch groups.
"""
user_field = "accountId" if self._is_cloud else "key"
user_value = user_id
# Server uses userKey (but calls it key during the API call), Cloud uses accountId
user_query = f"{user_field}={quote(user_value)}"
url = f"rest/api/user/memberof?{user_query}"
yield from self._paginate_url(url, limit, force_offset_pagination=True)
def paginated_groups_retrieval(
self,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This is not an SQL like query.
It's a confluence specific endpoint that can be used to fetch groups.
"""
yield from self._paginate_url("rest/api/group", limit)
def paginated_group_members_retrieval(
self,
group_name: str,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This is not an SQL like query.
It's a confluence specific endpoint that can be used to fetch the members of a group.
THIS DOESN'T WORK FOR SERVER because it breaks when there is a slash in the group name.
E.g. neither "test/group" nor "test%2Fgroup" works for confluence.
"""
group_name = quote(group_name)
yield from self._paginate_url(f"rest/api/group/{group_name}/member", limit)
def get_all_space_permissions_server(
self,
space_key: str,
) -> list[dict[str, Any]]:
"""
This is a confluence server specific method that can be used to
fetch the permissions of a space.
This is better logging than calling the get_space_permissions method
because it returns a jsonrpc response.
TODO: Make this call these endpoints for newer confluence versions:
- /rest/api/space/{spaceKey}/permissions
- /rest/api/space/{spaceKey}/permissions/anonymous
"""
url = "rpc/json-rpc/confluenceservice-v2"
data = {
"jsonrpc": "2.0",
"method": "getSpacePermissionSets",
"id": 7,
"params": [space_key],
}
response = self.post(url, data=data)
logging.debug(f"jsonrpc response: {response}")
if not response.get("result"):
logging.warning(
f"No jsonrpc response for space permissions for space {space_key}"
f"\nResponse: {response}"
)
return response.get("result", [])
def get_current_user(self, expand: str | None = None) -> Any:
"""
Implements a method that isn't in the third party client.
Get information about the current user
:param expand: OPTIONAL expand for get status of user.
Possible param is "status". Results are "Active, Deactivated"
:return: Returns the user details
"""
from atlassian.errors import ApiPermissionError # type:ignore
url = "rest/api/user/current"
params = {}
if expand:
params["expand"] = expand
try:
response = self.get(url, params=params)
except HTTPError as e:
if e.response.status_code == 403:
raise ApiPermissionError(
"The calling user does not have permission", reason=e
)
raise
return response
def get_user_email_from_username__server(
confluence_client: OnyxConfluence, user_name: str
) -> str | None:
global _USER_EMAIL_CACHE
if _USER_EMAIL_CACHE.get(user_name) is None:
try:
response = confluence_client.get_mobile_parameters(user_name)
email = response.get("email")
except Exception:
logging.warning(f"failed to get confluence email for {user_name}")
# For now, we'll just return None and log a warning. This means
# we will keep retrying to get the email every group sync.
email = None
# We may want to just return a string that indicates failure so we don't
# keep retrying
# email = f"FAILED TO GET CONFLUENCE EMAIL FOR {user_name}"
_USER_EMAIL_CACHE[user_name] = email
return _USER_EMAIL_CACHE[user_name]
def _get_user(confluence_client: OnyxConfluence, user_id: str) -> str:
"""Get Confluence Display Name based on the account-id or userkey value
Args:
user_id (str): The user id (i.e: the account-id or userkey)
confluence_client (Confluence): The Confluence Client
Returns:
str: The User Display Name. 'Unknown User' if the user is deactivated or not found
"""
global _USER_ID_TO_DISPLAY_NAME_CACHE
if _USER_ID_TO_DISPLAY_NAME_CACHE.get(user_id) is None:
try:
result = confluence_client.get_user_details_by_userkey(user_id)
found_display_name = result.get("displayName")
except Exception:
found_display_name = None
if not found_display_name:
try:
result = confluence_client.get_user_details_by_accountid(user_id)
found_display_name = result.get("displayName")
except Exception:
found_display_name = None
_USER_ID_TO_DISPLAY_NAME_CACHE[user_id] = found_display_name
return _USER_ID_TO_DISPLAY_NAME_CACHE.get(user_id) or _USER_NOT_FOUND
def sanitize_attachment_title(title: str) -> str:
"""
Sanitize the attachment title to be a valid HTML attribute.
"""
return title.replace("<", "_").replace(">", "_").replace(" ", "_").replace(":", "_")
def extract_text_from_confluence_html(
confluence_client: OnyxConfluence,
confluence_object: dict[str, Any],
fetched_titles: set[str],
) -> str:
"""Parse a Confluence html page and replace the 'user id' by the real
User Display Name
Args:
confluence_object (dict): The confluence object as a dict
confluence_client (Confluence): Confluence client
fetched_titles (set[str]): The titles of the pages that have already been fetched
Returns:
str: loaded and formated Confluence page
"""
body = confluence_object["body"]
object_html = body.get("storage", body.get("view", {})).get("value")
soup = bs4.BeautifulSoup(object_html, "html.parser")
_remove_macro_stylings(soup=soup)
for user in soup.findAll("ri:user"):
user_id = (
user.attrs["ri:account-id"]
if "ri:account-id" in user.attrs
else user.get("ri:userkey")
)
if not user_id:
logging.warning(
"ri:userkey not found in ri:user element. " f"Found attrs: {user.attrs}"
)
continue
# Include @ sign for tagging, more clear for LLM
user.replaceWith("@" + _get_user(confluence_client, user_id))
for html_page_reference in soup.findAll("ac:structured-macro"):
# Here, we only want to process page within page macros
if html_page_reference.attrs.get("ac:name") != "include":
continue
page_data = html_page_reference.find("ri:page")
if not page_data:
logging.warning(
f"Skipping retrieval of {html_page_reference} because because page data is missing"
)
continue
page_title = page_data.attrs.get("ri:content-title")
if not page_title:
# only fetch pages that have a title
logging.warning(
f"Skipping retrieval of {html_page_reference} because it has no title"
)
continue
if page_title in fetched_titles:
# prevent recursive fetching of pages
logging.debug(f"Skipping {page_title} because it has already been fetched")
continue
fetched_titles.add(page_title)
# Wrap this in a try-except because there are some pages that might not exist
try:
page_query = f"type=page and title='{quote(page_title)}'"
page_contents: dict[str, Any] | None = None
# Confluence enforces title uniqueness, so we should only get one result here
for page in confluence_client.paginated_cql_retrieval(
cql=page_query,
expand="body.storage.value",
limit=1,
):
page_contents = page
break
except Exception as e:
logging.warning(
f"Error getting page contents for object {confluence_object}: {e}"
)
continue
if not page_contents:
continue
text_from_page = extract_text_from_confluence_html(
confluence_client=confluence_client,
confluence_object=page_contents,
fetched_titles=fetched_titles,
)
html_page_reference.replaceWith(text_from_page)
for html_link_body in soup.findAll("ac:link-body"):
# This extracts the text from inline links in the page so they can be
# represented in the document text as plain text
try:
text_from_link = html_link_body.text
html_link_body.replaceWith(f"(LINK TEXT: {text_from_link})")
except Exception as e:
logging.warning(f"Error processing ac:link-body: {e}")
for html_attachment in soup.findAll("ri:attachment"):
# This extracts the text from inline attachments in the page so they can be
# represented in the document text as plain text
try:
html_attachment.replaceWith(
f"<attachment>{sanitize_attachment_title(html_attachment.attrs['ri:filename'])}</attachment>"
) # to be replaced later
except Exception as e:
logging.warning(f"Error processing ac:attachment: {e}")
return format_document_soup(soup)
def _remove_macro_stylings(soup: bs4.BeautifulSoup) -> None:
for macro_root in soup.findAll("ac:structured-macro"):
if not isinstance(macro_root, bs4.Tag):
continue
macro_styling = macro_root.find(name="ac:parameter", attrs={"ac:name": "page"})
if not macro_styling or not isinstance(macro_styling, bs4.Tag):
continue
macro_styling.extract()
def get_page_restrictions(
confluence_client: OnyxConfluence,
page_id: str,
page_restrictions: dict[str, Any],
ancestors: list[dict[str, Any]],
) -> ExternalAccess | None:
"""
Get page access restrictions for a Confluence page.
This functionality requires Enterprise Edition.
Args:
confluence_client: OnyxConfluence client instance
page_id: The ID of the page
page_restrictions: Dictionary containing page restriction data
ancestors: List of ancestor pages with their restriction data
Returns:
ExternalAccess object for the page. None if EE is not enabled or no restrictions found.
"""
# Fetch the EE implementation
"""
ee_get_all_page_restrictions = cast(
Callable[
[OnyxConfluence, str, dict[str, Any], list[dict[str, Any]]],
ExternalAccess | None,
],
fetch_versioned_implementation(
"onyx.external_permissions.confluence.page_access", "get_page_restrictions"
),
)
return ee_get_all_page_restrictions(
confluence_client, page_id, page_restrictions, ancestors
)"""
return {}
def get_all_space_permissions(
confluence_client: OnyxConfluence,
is_cloud: bool,
) -> dict[str, ExternalAccess]:
"""
Get access permissions for all spaces in Confluence.
This functionality requires Enterprise Edition.
Args:
confluence_client: OnyxConfluence client instance
is_cloud: Whether this is a Confluence Cloud instance
Returns:
Dictionary mapping space keys to ExternalAccess objects. Empty dict if EE is not enabled.
"""
"""
# Fetch the EE implementation
ee_get_all_space_permissions = cast(
Callable[
[OnyxConfluence, bool],
dict[str, ExternalAccess],
],
fetch_versioned_implementation(
"onyx.external_permissions.confluence.space_access",
"get_all_space_permissions",
),
)
return ee_get_all_space_permissions(confluence_client, is_cloud)"""
return {}
def _make_attachment_link(
confluence_client: "OnyxConfluence",
attachment: dict[str, Any],
parent_content_id: str | None = None,
) -> str | None:
download_link = ""
from urllib.parse import urlparse
netloc =urlparse(confluence_client.url).hostname
if netloc == "api.atlassian.com" or (netloc and netloc.endswith(".api.atlassian.com")):
# if "api.atlassian.com" in confluence_client.url:
# https://developer.atlassian.com/cloud/confluence/rest/v1/api-group-content---attachments/#api-wiki-rest-api-content-id-child-attachment-attachmentid-download-get
if not parent_content_id:
logging.warning(
"parent_content_id is required to download attachments from Confluence Cloud!"
)
return None
download_link = (
confluence_client.url
+ f"/rest/api/content/{parent_content_id}/child/attachment/{attachment['id']}/download"
)
else:
download_link = confluence_client.url + attachment["_links"]["download"]
return download_link
def _process_image_attachment(
confluence_client: "OnyxConfluence",
attachment: dict[str, Any],
raw_bytes: bytes,
media_type: str,
) -> AttachmentProcessingResult:
"""Process an image attachment by saving it without generating a summary."""
return AttachmentProcessingResult(text="", file_blob=raw_bytes, file_name=attachment.get("title", "unknown_title"), error=None)
def process_attachment(
confluence_client: "OnyxConfluence",
attachment: dict[str, Any],
parent_content_id: str | None,
allow_images: bool,
) -> AttachmentProcessingResult:
"""
Processes a Confluence attachment. If it's a document, extracts text,
or if it's an image, stores it for later analysis. Returns a structured result.
"""
try:
# Get the media type from the attachment metadata
media_type: str = attachment.get("metadata", {}).get("mediaType", "")
# Validate the attachment type
if not validate_attachment_filetype(attachment):
return AttachmentProcessingResult(
text=None,
file_blob=None,
file_name=None,
error=f"Unsupported file type: {media_type}",
)
attachment_link = _make_attachment_link(
confluence_client, attachment, parent_content_id
)
if not attachment_link:
return AttachmentProcessingResult(
text=None, file_blob=None, file_name=None, error="Failed to make attachment link"
)
attachment_size = attachment["extensions"]["fileSize"]
if media_type.startswith("image/"):
if not allow_images:
return AttachmentProcessingResult(
text=None,
file_blob=None,
file_name=None,
error="Image downloading is not enabled",
)
else:
if attachment_size > CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD:
logging.warning(
f"Skipping {attachment_link} due to size. "
f"size={attachment_size} "
f"threshold={CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD}"
)
return AttachmentProcessingResult(
text=None,
file_blob=None,
file_name=None,
error=f"Attachment text too long: {attachment_size} chars",
)
logging.info(
f"Downloading attachment: "
f"title={attachment['title']} "
f"length={attachment_size} "
f"link={attachment_link}"
)
# Download the attachment
resp: requests.Response = confluence_client._session.get(attachment_link)
if resp.status_code != 200:
logging.warning(
f"Failed to fetch {attachment_link} with status code {resp.status_code}"
)
return AttachmentProcessingResult(
text=None,
file_blob=None,
file_name=None,
error=f"Attachment download status code is {resp.status_code}",
)
raw_bytes = resp.content
if not raw_bytes:
return AttachmentProcessingResult(
text=None, file_blob=None, file_name=None, error="attachment.content is None"
)
# Process image attachments
if media_type.startswith("image/"):
return _process_image_attachment(
confluence_client, attachment, raw_bytes, media_type
)
# Process document attachments
try:
return AttachmentProcessingResult(text="",file_blob=raw_bytes, file_name=attachment.get("title", "unknown_title"), error=None)
except Exception as e:
logging.exception(e)
return AttachmentProcessingResult(
text=None, file_blob=None, file_name=None, error=f"Failed to extract text: {e}"
)
except Exception as e:
return AttachmentProcessingResult(
text=None, file_blob=None, file_name=None, error=f"Failed to process attachment: {e}"
)
def convert_attachment_to_content(
confluence_client: "OnyxConfluence",
attachment: dict[str, Any],
page_id: str,
allow_images: bool,
) -> tuple[str | None, bytes | bytearray | None] | None:
"""
Facade function which:
1. Validates attachment type
2. Extracts content or stores image for later processing
3. Returns (content_text, stored_file_name) or None if we should skip it
"""
media_type = attachment.get("metadata", {}).get("mediaType", "")
# Quick check for unsupported types:
if media_type.startswith("video/") or media_type == "application/gliffy+json":
logging.warning(
f"Skipping unsupported attachment type: '{media_type}' for {attachment['title']}"
)
return None
result = process_attachment(confluence_client, attachment, page_id, allow_images)
if result.error is not None:
logging.warning(
f"Attachment {attachment['title']} encountered error: {result.error}"
)
return None
return result.file_name, result.file_blob
class ConfluenceConnector(
CheckpointedConnector[ConfluenceCheckpoint],
SlimConnector,
SlimConnectorWithPermSync,
CredentialsConnector,
):
def __init__(
self,
wiki_base: str,
is_cloud: bool,
space: str = "",
page_id: str = "",
index_recursively: bool = False,
cql_query: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
continue_on_failure: bool = CONTINUE_ON_CONNECTOR_FAILURE,
# if a page has one of the labels specified in this list, we will just
# skip it. This is generally used to avoid indexing extra sensitive
# pages.
labels_to_skip: list[str] = CONFLUENCE_CONNECTOR_LABELS_TO_SKIP,
timezone_offset: float = CONFLUENCE_TIMEZONE_OFFSET,
time_buffer_seconds: int = CONFLUENCE_SYNC_TIME_BUFFER_SECONDS,
scoped_token: bool = False,
) -> None:
self.wiki_base = wiki_base
self.is_cloud = is_cloud
self.space = space
self.page_id = page_id
self.index_recursively = index_recursively
self.cql_query = cql_query
self.batch_size = batch_size
self.labels_to_skip = labels_to_skip
self.timezone_offset = timezone_offset
self.time_buffer_seconds = max(0, time_buffer_seconds)
self.scoped_token = scoped_token
self._confluence_client: OnyxConfluence | None = None
self._low_timeout_confluence_client: OnyxConfluence | None = None
self._fetched_titles: set[str] = set()
self.allow_images = False
# Track document names to detect duplicates
self._document_name_counts: dict[str, int] = {}
self._document_name_paths: dict[str, list[str]] = {}
# Remove trailing slash from wiki_base if present
self.wiki_base = wiki_base.rstrip("/")
"""
If nothing is provided, we default to fetching all pages
Only one or none of the following options should be specified so
the order shouldn't matter
However, we use elif to ensure that only of the following is enforced
"""
base_cql_page_query = "type=page"
if cql_query:
base_cql_page_query = cql_query
elif page_id:
if index_recursively:
base_cql_page_query += f" and (ancestor='{page_id}' or id='{page_id}')"
else:
base_cql_page_query += f" and id='{page_id}'"
elif space:
uri_safe_space = quote(space)
base_cql_page_query += f" and space='{uri_safe_space}'"
self.base_cql_page_query = base_cql_page_query
self.cql_label_filter = ""
if labels_to_skip:
labels_to_skip = list(set(labels_to_skip))
comma_separated_labels = ",".join(
f"'{quote(label)}'" for label in labels_to_skip
)
self.cql_label_filter = f" and label not in ({comma_separated_labels})"
self.timezone: timezone = timezone(offset=timedelta(hours=timezone_offset))
self.credentials_provider: CredentialsProviderInterface | None = None
self.probe_kwargs = {
"max_backoff_retries": 6,
"max_backoff_seconds": 10,
}
self.final_kwargs = {
"max_backoff_retries": 10,
"max_backoff_seconds": 60,
}
# deprecated
self.continue_on_failure = continue_on_failure
def set_allow_images(self, value: bool) -> None:
logging.info(f"Setting allow_images to {value}.")
self.allow_images = value
def _adjust_start_for_query(
self, start: SecondsSinceUnixEpoch | None
) -> SecondsSinceUnixEpoch | None:
if not start or start <= 0:
return start
if self.time_buffer_seconds <= 0:
return start
return max(0.0, start - self.time_buffer_seconds)
def _is_newer_than_start(
self, doc_time: datetime | None, start: SecondsSinceUnixEpoch | None
) -> bool:
if not start or start <= 0:
return True
if doc_time is None:
return True
return doc_time.timestamp() > start
@property
def confluence_client(self) -> OnyxConfluence:
if self._confluence_client is None:
raise ConnectorMissingCredentialError("Confluence")
return self._confluence_client
@property
def low_timeout_confluence_client(self) -> OnyxConfluence:
if self._low_timeout_confluence_client is None:
raise ConnectorMissingCredentialError("Confluence")
return self._low_timeout_confluence_client
def set_credentials_provider(
self, credentials_provider: CredentialsProviderInterface
) -> None:
self.credentials_provider = credentials_provider
# raises exception if there's a problem
confluence_client = OnyxConfluence(
is_cloud=self.is_cloud,
url=self.wiki_base,
credentials_provider=credentials_provider,
scoped_token=self.scoped_token,
)
confluence_client._probe_connection(**self.probe_kwargs)
confluence_client._initialize_connection(**self.final_kwargs)
self._confluence_client = confluence_client
# create a low timeout confluence client for sync flows
low_timeout_confluence_client = OnyxConfluence(
is_cloud=self.is_cloud,
url=self.wiki_base,
credentials_provider=credentials_provider,
timeout=3,
scoped_token=self.scoped_token,
)
low_timeout_confluence_client._probe_connection(**self.probe_kwargs)
low_timeout_confluence_client._initialize_connection(**self.final_kwargs)
self._low_timeout_confluence_client = low_timeout_confluence_client
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
raise NotImplementedError("Use set_credentials_provider with this connector.")
def _construct_page_cql_query(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> str:
"""
Constructs a CQL query for use in the confluence API. See
https://developer.atlassian.com/server/confluence/advanced-searching-using-cql/
for more information. This is JUST the CQL, not the full URL used to hit the API.
Use _build_page_retrieval_url to get the full URL.
"""
page_query = self.base_cql_page_query + self.cql_label_filter
# Add time filters
query_start = self._adjust_start_for_query(start)
if query_start:
formatted_start_time = datetime.fromtimestamp(
query_start, tz=self.timezone
).strftime("%Y-%m-%d %H:%M")
page_query += f" and lastmodified >= '{formatted_start_time}'"
if end:
formatted_end_time = datetime.fromtimestamp(end, tz=self.timezone).strftime(
"%Y-%m-%d %H:%M"
)
page_query += f" and lastmodified <= '{formatted_end_time}'"
page_query += " order by lastmodified asc"
return page_query
def _construct_attachment_query(
self,
confluence_page_id: str,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> str:
attachment_query = f"type=attachment and container='{confluence_page_id}'"
attachment_query += self.cql_label_filter
# Add time filters to avoid reprocessing unchanged attachments during refresh
query_start = self._adjust_start_for_query(start)
if query_start:
formatted_start_time = datetime.fromtimestamp(
query_start, tz=self.timezone
).strftime("%Y-%m-%d %H:%M")
attachment_query += f" and lastmodified >= '{formatted_start_time}'"
if end:
formatted_end_time = datetime.fromtimestamp(end, tz=self.timezone).strftime(
"%Y-%m-%d %H:%M"
)
attachment_query += f" and lastmodified <= '{formatted_end_time}'"
attachment_query += " order by lastmodified asc"
return attachment_query
def _get_comment_string_for_page_id(self, page_id: str) -> str:
comment_string = ""
comment_cql = f"type=comment and container='{page_id}'"
comment_cql += self.cql_label_filter
expand = ",".join(_COMMENT_EXPANSION_FIELDS)
for comment in self.confluence_client.paginated_cql_retrieval(
cql=comment_cql,
expand=expand,
):
comment_string += "\nComment:\n"
comment_string += extract_text_from_confluence_html(
confluence_client=self.confluence_client,
confluence_object=comment,
fetched_titles=set(),
)
return comment_string
def _convert_page_to_document(
self, page: dict[str, Any]
) -> Document | ConnectorFailure:
"""
Converts a Confluence page to a Document object.
Includes the page content, comments, and attachments.
"""
page_id = page_url = ""
try:
# Extract basic page information
page_id = page["id"]
page_title = page["title"]
logging.info(f"Converting page {page_title} to document")
page_url = build_confluence_document_id(
self.wiki_base, page["_links"]["webui"], self.is_cloud
)
# Build hierarchical path for semantic identifier
space_name = page.get("space", {}).get("name", "")
# Build path from ancestors
path_parts = []
if space_name:
path_parts.append(space_name)
# Add ancestor pages to path if available
if "ancestors" in page and page["ancestors"]:
for ancestor in page["ancestors"]:
ancestor_title = ancestor.get("title", "")
if ancestor_title:
path_parts.append(ancestor_title)
# Add current page title
path_parts.append(page_title)
# Track page names for duplicate detection
full_path = " / ".join(path_parts) if len(path_parts) > 1 else page_title
# Count occurrences of this page title
if page_title not in self._document_name_counts:
self._document_name_counts[page_title] = 0
self._document_name_paths[page_title] = []
self._document_name_counts[page_title] += 1
self._document_name_paths[page_title].append(full_path)
# Use simple name if no duplicates, otherwise use full path
if self._document_name_counts[page_title] == 1:
semantic_identifier = page_title
else:
semantic_identifier = full_path
# Get the page content
page_content = extract_text_from_confluence_html(
self.confluence_client, page, self._fetched_titles
)
# Create the main section for the page content
sections: list[TextSection | ImageSection] = [
TextSection(text=page_content, link=page_url)
]
# Process comments if available
comment_text = self._get_comment_string_for_page_id(page_id)
if comment_text:
sections.append(
TextSection(text=comment_text, link=f"{page_url}#comments")
)
# Note: attachments are no longer merged into the page document.
# They are indexed as separate documents downstream.
# Extract metadata
metadata = {}
if "space" in page:
metadata["space"] = page["space"].get("name", "")
# Extract labels
labels = []
if "metadata" in page and "labels" in page["metadata"]:
for label in page["metadata"]["labels"].get("results", []):
labels.append(label.get("name", ""))
if labels:
metadata["labels"] = labels
# Extract owners
primary_owners = []
if "version" in page and "by" in page["version"]:
author = page["version"]["by"]
display_name = author.get("displayName", "Unknown")
email = author.get("email", "unknown@domain.invalid")
primary_owners.append(
BasicExpertInfo(display_name=display_name, email=email)
)
# Create the document
return Document(
id=page_url,
source=DocumentSource.CONFLUENCE,
semantic_identifier=semantic_identifier,
extension=".html", # Confluence pages are HTML
blob=page_content.encode("utf-8"), # Encode page content as bytes
doc_updated_at=datetime_from_string(page["version"]["when"]),
size_bytes=len(page_content.encode("utf-8")), # Calculate size in bytes
primary_owners=primary_owners if primary_owners else None,
metadata=metadata if metadata else None,
)
except Exception as e:
logging.error(f"Error converting page {page.get('id', 'unknown')}: {e}")
if is_atlassian_date_error(e): # propagate error to be caught and retried
raise
return ConnectorFailure(
failed_document=DocumentFailure(
document_id=page_id,
document_link=page_url,
),
failure_message=f"Error converting page {page.get('id', 'unknown')}: {e}",
exception=e,
)
def _fetch_page_attachments(
self,
page: dict[str, Any],
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> tuple[list[Document], list[ConnectorFailure]]:
"""
Inline attachments are added directly to the document as text or image sections by
this function. The returned documents/connectorfailures are for non-inline attachments
and those at the end of the page.
"""
attachment_query = self._construct_attachment_query(page["id"], start, end)
attachment_failures: list[ConnectorFailure] = []
attachment_docs: list[Document] = []
page_url = ""
for attachment in self.confluence_client.paginated_cql_retrieval(
cql=attachment_query,
expand=",".join(_ATTACHMENT_EXPANSION_FIELDS),
):
media_type: str = attachment.get("metadata", {}).get("mediaType", "")
# TODO(rkuo): this check is partially redundant with validate_attachment_filetype
# and checks in convert_attachment_to_content/process_attachment
# but doing the check here avoids an unnecessary download. Due for refactoring.
if not self.allow_images:
if media_type.startswith("image/"):
logging.info(
f"Skipping attachment because allow images is False: {attachment['title']}"
)
continue
if not validate_attachment_filetype(
attachment,
):
logging.info(
f"Skipping attachment because it is not an accepted file type: {attachment['title']}"
)
continue
logging.info(
f"Processing attachment: {attachment['title']} attached to page {page['title']}"
)
# Attachment document id: use the download URL for stable identity
try:
object_url = build_confluence_document_id(
self.wiki_base, attachment["_links"]["download"], self.is_cloud
)
except Exception as e:
logging.warning(
f"Invalid attachment url for id {attachment['id']}, skipping"
)
logging.debug(f"Error building attachment url: {e}")
continue
try:
response = convert_attachment_to_content(
confluence_client=self.confluence_client,
attachment=attachment,
page_id=page["id"],
allow_images=self.allow_images,
)
if response is None:
continue
file_storage_name, file_blob = response
if not file_blob:
logging.info("Skipping attachment because it is no blob fetched")
continue
# Build attachment-specific metadata
attachment_metadata: dict[str, str | list[str]] = {}
if "space" in attachment:
attachment_metadata["space"] = attachment["space"].get("name", "")
labels: list[str] = []
if "metadata" in attachment and "labels" in attachment["metadata"]:
for label in attachment["metadata"]["labels"].get("results", []):
labels.append(label.get("name", ""))
if labels:
attachment_metadata["labels"] = labels
page_url = page_url or build_confluence_document_id(
self.wiki_base, page["_links"]["webui"], self.is_cloud
)
attachment_metadata["parent_page_id"] = page_url
attachment_id = build_confluence_document_id(
self.wiki_base, attachment["_links"]["webui"], self.is_cloud
)
# Build semantic identifier with space and page context
attachment_title = attachment.get("title", object_url)
space_name = page.get("space", {}).get("name", "")
page_title = page.get("title", "")
# Create hierarchical name: Space / Page / Attachment
attachment_path_parts = []
if space_name:
attachment_path_parts.append(space_name)
if page_title:
attachment_path_parts.append(page_title)
attachment_path_parts.append(attachment_title)
full_attachment_path = " / ".join(attachment_path_parts) if len(attachment_path_parts) > 1 else attachment_title
# Track attachment names for duplicate detection
if attachment_title not in self._document_name_counts:
self._document_name_counts[attachment_title] = 0
self._document_name_paths[attachment_title] = []
self._document_name_counts[attachment_title] += 1
self._document_name_paths[attachment_title].append(full_attachment_path)
# Use simple name if no duplicates, otherwise use full path
if self._document_name_counts[attachment_title] == 1:
attachment_semantic_identifier = attachment_title
else:
attachment_semantic_identifier = full_attachment_path
primary_owners: list[BasicExpertInfo] | None = None
if "version" in attachment and "by" in attachment["version"]:
author = attachment["version"]["by"]
display_name = author.get("displayName", "Unknown")
email = author.get("email", "unknown@domain.invalid")
primary_owners = [
BasicExpertInfo(display_name=display_name, email=email)
]
extension = Path(attachment.get("title", "")).suffix or ".unknown"
attachment_doc = Document(
id=attachment_id,
# sections=sections,
source=DocumentSource.CONFLUENCE,
semantic_identifier=attachment_semantic_identifier,
extension=extension,
blob=file_blob,
size_bytes=len(file_blob),
metadata=attachment_metadata,
doc_updated_at=(
datetime_from_string(attachment["version"]["when"])
if attachment.get("version")
and attachment["version"].get("when")
else None
),
primary_owners=primary_owners,
)
if self._is_newer_than_start(attachment_doc.doc_updated_at, start):
attachment_docs.append(attachment_doc)
except Exception as e:
logging.error(
f"Failed to extract/summarize attachment {attachment['title']}",
exc_info=e,
)
if is_atlassian_date_error(e):
# propagate error to be caught and retried
raise
attachment_failures.append(
ConnectorFailure(
failed_document=DocumentFailure(
document_id=object_url,
document_link=object_url,
),
failure_message=f"Failed to extract/summarize attachment {attachment['title']} for doc {object_url}",
exception=e,
)
)
return attachment_docs, attachment_failures
def _fetch_document_batches(
self,
checkpoint: ConfluenceCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> CheckpointOutput[ConfluenceCheckpoint]:
"""
Yields batches of Documents. For each page:
- Create a Document with 1 Section for the page text/comments
- Then fetch attachments. For each attachment:
- Attempt to convert it with convert_attachment_to_content(...)
- If successful, create a new Section with the extracted text or summary.
"""
checkpoint = copy.deepcopy(checkpoint)
# use "start" when last_updated is 0 or for confluence server
start_ts = start
page_query_url = checkpoint.next_page_url or self._build_page_retrieval_url(
start_ts, end, self.batch_size
)
logging.debug(f"page_query_url: {page_query_url}")
# store the next page start for confluence server, cursor for confluence cloud
def store_next_page_url(next_page_url: str) -> None:
checkpoint.next_page_url = next_page_url
for page in self.confluence_client.paginated_page_retrieval(
cql_url=page_query_url,
limit=self.batch_size,
next_page_callback=store_next_page_url,
):
# Build doc from page
doc_or_failure = self._convert_page_to_document(page)
if isinstance(doc_or_failure, ConnectorFailure):
yield doc_or_failure
continue
# yield completed document (or failure)
if self._is_newer_than_start(doc_or_failure.doc_updated_at, start):
yield doc_or_failure
# Now get attachments for that page:
attachment_docs, attachment_failures = self._fetch_page_attachments(
page, start, end
)
# yield attached docs and failures
yield from attachment_docs
# yield from attachment_failures
# Create checkpoint once a full page of results is returned
if checkpoint.next_page_url and checkpoint.next_page_url != page_query_url:
return checkpoint
checkpoint.has_more = False
return checkpoint
def _build_page_retrieval_url(
self,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
limit: int,
) -> str:
"""
Builds the full URL used to retrieve pages from the confluence API.
This can be used as input to the confluence client's _paginate_url
or paginated_page_retrieval methods.
"""
page_query = self._construct_page_cql_query(start, end)
cql_url = self.confluence_client.build_cql_url(
page_query, expand=",".join(_PAGE_EXPANSION_FIELDS)
)
logging.info(f"[Confluence Connector] Building CQL URL {cql_url}")
return update_param_in_path(cql_url, "limit", str(limit))
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConfluenceCheckpoint,
) -> CheckpointOutput[ConfluenceCheckpoint]:
end += ONE_DAY # handle time zone weirdness
try:
return self._fetch_document_batches(checkpoint, start, end)
except Exception as e:
if is_atlassian_date_error(e) and start is not None:
logging.warning(
"Confluence says we provided an invalid 'updated' field. This may indicate"
"a real issue, but can also appear during edge cases like daylight"
f"savings time changes. Retrying with a 1 hour offset. Error: {e}"
)
return self._fetch_document_batches(checkpoint, start - ONE_HOUR, end)
raise
@override
def build_dummy_checkpoint(self) -> ConfluenceCheckpoint:
return ConfluenceCheckpoint(has_more=True, next_page_url=None)
@override
def validate_checkpoint_json(self, checkpoint_json: str) -> ConfluenceCheckpoint:
return ConfluenceCheckpoint.model_validate_json(checkpoint_json)
@override
def retrieve_all_slim_docs(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> GenerateSlimDocumentOutput:
return self._retrieve_all_slim_docs(
start=start,
end=end,
callback=callback,
include_permissions=False,
)
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> GenerateSlimDocumentOutput:
"""
Return 'slim' docs (IDs + minimal permission data).
Does not fetch actual text. Used primarily for incremental permission sync.
"""
return self._retrieve_all_slim_docs(
start=start,
end=end,
callback=callback,
include_permissions=True,
)
def _retrieve_all_slim_docs(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
include_permissions: bool = True,
) -> GenerateSlimDocumentOutput:
doc_metadata_list: list[SlimDocument] = []
restrictions_expand = ",".join(_RESTRICTIONS_EXPANSION_FIELDS)
space_level_access_info: dict[str, ExternalAccess] = {}
if include_permissions:
space_level_access_info = get_all_space_permissions(
self.confluence_client, self.is_cloud
)
def get_external_access(
doc_id: str, restrictions: dict[str, Any], ancestors: list[dict[str, Any]]
) -> ExternalAccess | None:
return get_page_restrictions(
self.confluence_client, doc_id, restrictions, ancestors
) or space_level_access_info.get(page_space_key)
# Query pages
page_query = self.base_cql_page_query + self.cql_label_filter
for page in self.confluence_client.cql_paginate_all_expansions(
cql=page_query,
expand=restrictions_expand,
limit=_SLIM_DOC_BATCH_SIZE,
):
page_id = page["id"]
page_restrictions = page.get("restrictions") or {}
page_space_key = page.get("space", {}).get("key")
page_ancestors = page.get("ancestors", [])
page_id = build_confluence_document_id(
self.wiki_base, page["_links"]["webui"], self.is_cloud
)
doc_metadata_list.append(
SlimDocument(
id=page_id,
external_access=(
get_external_access(page_id, page_restrictions, page_ancestors)
if include_permissions
else None
),
)
)
# Query attachments for each page
attachment_query = self._construct_attachment_query(page["id"])
for attachment in self.confluence_client.cql_paginate_all_expansions(
cql=attachment_query,
expand=restrictions_expand,
limit=_SLIM_DOC_BATCH_SIZE,
):
# If you skip images, you'll skip them in the permission sync
attachment["metadata"].get("mediaType", "")
if not validate_attachment_filetype(
attachment,
):
continue
attachment_restrictions = attachment.get("restrictions", {})
if not attachment_restrictions:
attachment_restrictions = page_restrictions or {}
attachment_space_key = attachment.get("space", {}).get("key")
if not attachment_space_key:
attachment_space_key = page_space_key
attachment_id = build_confluence_document_id(
self.wiki_base,
attachment["_links"]["webui"],
self.is_cloud,
)
doc_metadata_list.append(
SlimDocument(
id=attachment_id,
external_access=(
get_external_access(
attachment_id, attachment_restrictions, []
)
if include_permissions
else None
),
)
)
if len(doc_metadata_list) > _SLIM_DOC_BATCH_SIZE:
yield doc_metadata_list[:_SLIM_DOC_BATCH_SIZE]
doc_metadata_list = doc_metadata_list[_SLIM_DOC_BATCH_SIZE:]
if callback and callback.should_stop():
raise RuntimeError(
"retrieve_all_slim_docs_perm_sync: Stop signal detected"
)
if callback:
callback.progress("retrieve_all_slim_docs_perm_sync", 1)
yield doc_metadata_list
def validate_connector_settings(self) -> None:
try:
spaces = self.low_timeout_confluence_client.get_all_spaces(limit=1)
except HTTPError as e:
status_code = e.response.status_code if e.response else None
if status_code == 401:
raise CredentialExpiredError(
"Invalid or expired Confluence credentials (HTTP 401)."
)
elif status_code == 403:
raise InsufficientPermissionsError(
"Insufficient permissions to access Confluence resources (HTTP 403)."
)
raise UnexpectedValidationError(
f"Unexpected Confluence error (status={status_code}): {e}"
)
except Exception as e:
raise UnexpectedValidationError(
f"Unexpected error while validating Confluence settings: {e}"
)
if self.space:
try:
self.low_timeout_confluence_client.get_space(self.space)
except ApiError as e:
raise ConnectorValidationError(
"Invalid Confluence space key provided"
) from e
if not spaces or not spaces.get("results"):
raise ConnectorValidationError(
"No Confluence spaces found. Either your credentials lack permissions, or "
"there truly are no spaces in this Confluence instance."
)
if __name__ == "__main__":
import os
# base url
wiki_base = os.environ["CONFLUENCE_URL"]
# auth stuff
username = os.environ["CONFLUENCE_USERNAME"]
access_token = os.environ["CONFLUENCE_ACCESS_TOKEN"]
is_cloud = os.environ["CONFLUENCE_IS_CLOUD"].lower() == "true"
# space + page
space = os.environ["CONFLUENCE_SPACE_KEY"]
# page_id = os.environ["CONFLUENCE_PAGE_ID"]
confluence_connector = ConfluenceConnector(
wiki_base=wiki_base,
space=space,
is_cloud=is_cloud,
# page_id=page_id,
)
credentials_provider = StaticCredentialsProvider(
None,
DocumentSource.CONFLUENCE,
{
"confluence_username": username,
"confluence_access_token": access_token,
},
)
confluence_connector.set_credentials_provider(credentials_provider)
start = 0.0
end = datetime.now().timestamp()
# Fetch all `SlimDocuments`.
for slim_doc in confluence_connector.retrieve_all_slim_docs_perm_sync():
print(slim_doc)
# Fetch all `Documents`.
for doc in load_all_docs_from_checkpoint_connector(
connector=confluence_connector,
start=start,
end=end,
):
print(doc)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/confluence_connector.py",
"license": "Apache License 2.0",
"lines": 1830,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/discord_connector.py | """Discord connector"""
import asyncio
import logging
import os
from datetime import datetime, timezone
from typing import Any, AsyncIterable, Iterable
from discord import Client, MessageType
from discord.channel import TextChannel, Thread
from discord.flags import Intents
from discord.message import Message as DiscordMessage
from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import ConnectorMissingCredentialError
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document, GenerateDocumentsOutput, TextSection
_DISCORD_DOC_ID_PREFIX = "DISCORD_"
_SNIPPET_LENGTH = 30
def _convert_message_to_document(
message: DiscordMessage,
sections: list[TextSection],
) -> Document:
"""
Convert a discord message to a document
Sections are collected before calling this function because it relies on async
calls to fetch the thread history if there is one
"""
metadata: dict[str, str | list[str]] = {}
semantic_substring = ""
# Only messages from TextChannels will make it here, but we have to check for it anyway
if isinstance(message.channel, TextChannel) and (channel_name := message.channel.name):
metadata["Channel"] = channel_name
semantic_substring += f" in Channel: #{channel_name}"
# If there is a thread, add more detail to the metadata, title, and semantic identifier
if isinstance(message.channel, Thread):
# Threads do have a title
title = message.channel.name
# Add more detail to the semantic identifier if available
semantic_substring += f" in Thread: {title}"
snippet: str = message.content[:_SNIPPET_LENGTH].rstrip() + "..." if len(message.content) > _SNIPPET_LENGTH else message.content
semantic_identifier = f"{message.author.name} said{semantic_substring}: {snippet}"
# fallback to created_at
doc_updated_at = message.edited_at if message.edited_at else message.created_at
if doc_updated_at and doc_updated_at.tzinfo is None:
doc_updated_at = doc_updated_at.replace(tzinfo=timezone.utc)
elif doc_updated_at:
doc_updated_at = doc_updated_at.astimezone(timezone.utc)
return Document(
id=f"{_DISCORD_DOC_ID_PREFIX}{message.id}",
source=DocumentSource.DISCORD,
semantic_identifier=semantic_identifier,
doc_updated_at=doc_updated_at,
blob=message.content.encode("utf-8"),
extension=".txt",
size_bytes=len(message.content.encode("utf-8")),
metadata=metadata if metadata else None,
)
async def _fetch_filtered_channels(
discord_client: Client,
server_ids: list[int] | None,
channel_names: list[str] | None,
) -> list[TextChannel]:
filtered_channels: list[TextChannel] = []
for channel in discord_client.get_all_channels():
if not channel.permissions_for(channel.guild.me).read_message_history:
continue
if not isinstance(channel, TextChannel):
continue
if server_ids and len(server_ids) > 0 and channel.guild.id not in server_ids:
continue
if channel_names and channel.name not in channel_names:
continue
filtered_channels.append(channel)
logging.info(f"Found {len(filtered_channels)} channels for the authenticated user")
return filtered_channels
async def _fetch_documents_from_channel(
channel: TextChannel,
start_time: datetime | None,
end_time: datetime | None,
) -> AsyncIterable[Document]:
# Discord's epoch starts at 2015-01-01
discord_epoch = datetime(2015, 1, 1, tzinfo=timezone.utc)
if start_time and start_time < discord_epoch:
start_time = discord_epoch
# NOTE: limit=None is the correct way to fetch all messages and threads with pagination
# The discord package erroneously uses limit for both pagination AND number of results
# This causes the history and archived_threads methods to return 100 results even if there are more results within the filters
# Pagination is handled automatically (100 results at a time) when limit=None
async for channel_message in channel.history(
limit=None,
after=start_time,
before=end_time,
):
# Skip messages that are not the default type
if channel_message.type != MessageType.default:
continue
sections: list[TextSection] = [
TextSection(
text=channel_message.content,
link=channel_message.jump_url,
)
]
yield _convert_message_to_document(channel_message, sections)
for active_thread in channel.threads:
async for thread_message in active_thread.history(
limit=None,
after=start_time,
before=end_time,
):
# Skip messages that are not the default type
if thread_message.type != MessageType.default:
continue
sections = [
TextSection(
text=thread_message.content,
link=thread_message.jump_url,
)
]
yield _convert_message_to_document(thread_message, sections)
async for archived_thread in channel.archived_threads(
limit=None,
):
async for thread_message in archived_thread.history(
limit=None,
after=start_time,
before=end_time,
):
# Skip messages that are not the default type
if thread_message.type != MessageType.default:
continue
sections = [
TextSection(
text=thread_message.content,
link=thread_message.jump_url,
)
]
yield _convert_message_to_document(thread_message, sections)
def _manage_async_retrieval(
token: str,
requested_start_date_string: str,
channel_names: list[str],
server_ids: list[int],
start: datetime | None = None,
end: datetime | None = None,
) -> Iterable[Document]:
# parse requested_start_date_string to datetime
pull_date: datetime | None = datetime.strptime(requested_start_date_string, "%Y-%m-%d").replace(tzinfo=timezone.utc) if requested_start_date_string else None
# Set start_time to the most recent of start and pull_date, or whichever is provided
start_time = max(filter(None, [start, pull_date])) if start or pull_date else None
end_time: datetime | None = end
proxy_url: str | None = os.environ.get("https_proxy") or os.environ.get("http_proxy")
if proxy_url:
logging.info(f"Using proxy for Discord: {proxy_url}")
async def _async_fetch() -> AsyncIterable[Document]:
intents = Intents.default()
intents.message_content = True
async with Client(intents=intents, proxy=proxy_url) as cli:
asyncio.create_task(coro=cli.start(token))
await cli.wait_until_ready()
filtered_channels: list[TextChannel] = await _fetch_filtered_channels(
discord_client=cli,
server_ids=server_ids,
channel_names=channel_names,
)
for channel in filtered_channels:
async for doc in _fetch_documents_from_channel(
channel=channel,
start_time=start_time,
end_time=end_time,
):
print(doc)
yield doc
def run_and_yield() -> Iterable[Document]:
loop = asyncio.new_event_loop()
try:
# Get the async generator
async_gen = _async_fetch()
# Convert to AsyncIterator
async_iter = async_gen.__aiter__()
while True:
try:
# Create a coroutine by calling anext with the async iterator
next_coro = anext(async_iter)
# Run the coroutine to get the next document
doc = loop.run_until_complete(next_coro)
yield doc
except StopAsyncIteration:
break
finally:
loop.close()
return run_and_yield()
class DiscordConnector(LoadConnector, PollConnector):
"""Discord connector for accessing Discord messages and channels"""
def __init__(
self,
server_ids: list[str] | None = None,
channel_names: list[str] | None = None,
# YYYY-MM-DD
start_date: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
):
self.batch_size = batch_size
self.channel_names: list[str] = channel_names if channel_names else []
self.server_ids: list[int] = [int(server_id) for server_id in server_ids] if server_ids else []
self._discord_bot_token: str | None = None
self.requested_start_date_string: str = start_date or ""
@property
def discord_bot_token(self) -> str:
if self._discord_bot_token is None:
raise ConnectorMissingCredentialError("Discord")
return self._discord_bot_token
def _manage_doc_batching(
self,
start: datetime | None = None,
end: datetime | None = None,
) -> GenerateDocumentsOutput:
doc_batch = []
def merge_batch():
nonlocal doc_batch
id = doc_batch[0].id
min_updated_at = doc_batch[0].doc_updated_at
max_updated_at = doc_batch[-1].doc_updated_at
blob = b''
size_bytes = 0
for d in doc_batch:
min_updated_at = min(min_updated_at, d.doc_updated_at)
max_updated_at = max(max_updated_at, d.doc_updated_at)
blob += b'\n\n' + d.blob
size_bytes += d.size_bytes
return Document(
id=id,
source=DocumentSource.DISCORD,
semantic_identifier=f"{min_updated_at} -> {max_updated_at}",
doc_updated_at=max_updated_at,
blob=blob,
extension=".txt",
size_bytes=size_bytes,
)
for doc in _manage_async_retrieval(
token=self.discord_bot_token,
requested_start_date_string=self.requested_start_date_string,
channel_names=self.channel_names,
server_ids=self.server_ids,
start=start,
end=end,
):
doc_batch.append(doc)
if len(doc_batch) >= self.batch_size:
yield [merge_batch()]
doc_batch = []
if doc_batch:
yield [merge_batch()]
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._discord_bot_token = credentials["discord_bot_token"]
return None
def validate_connector_settings(self) -> None:
"""Validate Discord connector settings"""
if not self.discord_client:
raise ConnectorMissingCredentialError("Discord")
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Any:
"""Poll Discord for recent messages"""
return self._manage_doc_batching(
datetime.fromtimestamp(start, tz=timezone.utc),
datetime.fromtimestamp(end, tz=timezone.utc),
)
def load_from_state(self) -> Any:
"""Load messages from Discord state"""
return self._manage_doc_batching(None, None)
if __name__ == "__main__":
import os
import time
end = time.time()
# 1 day
start = end - 24 * 60 * 60 * 1
# "1,2,3"
server_ids: str | None = os.environ.get("server_ids", None)
# "channel1,channel2"
channel_names: str | None = os.environ.get("channel_names", None)
connector = DiscordConnector(
server_ids=server_ids.split(",") if server_ids else [],
channel_names=channel_names.split(",") if channel_names else [],
start_date=os.environ.get("start_date", None),
)
connector.load_credentials({"discord_bot_token": os.environ.get("discord_bot_token")})
for doc_batch in connector.poll_source(start, end):
for doc in doc_batch:
print(doc)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/discord_connector.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/dropbox_connector.py | """Dropbox connector"""
import logging
from datetime import timezone
from typing import Any
from dropbox import Dropbox
from dropbox.exceptions import ApiError, AuthError
from dropbox.files import FileMetadata, FolderMetadata
from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
InsufficientPermissionsError,
)
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document, GenerateDocumentsOutput
from common.data_source.utils import get_file_ext
logger = logging.getLogger(__name__)
class DropboxConnector(LoadConnector, PollConnector):
"""Dropbox connector for accessing Dropbox files and folders"""
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self.dropbox_client: Dropbox | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load Dropbox credentials"""
access_token = credentials.get("dropbox_access_token")
if not access_token:
raise ConnectorMissingCredentialError("Dropbox access token is required")
self.dropbox_client = Dropbox(access_token)
return None
def validate_connector_settings(self) -> None:
"""Validate Dropbox connector settings"""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
try:
self.dropbox_client.files_list_folder(path="", limit=1)
except AuthError as e:
logger.exception("[Dropbox]: Failed to validate Dropbox credentials")
raise ConnectorValidationError(f"Dropbox credential is invalid: {e}")
except ApiError as e:
if e.error is not None and "insufficient_permissions" in str(e.error).lower():
raise InsufficientPermissionsError("Your Dropbox token does not have sufficient permissions.")
raise ConnectorValidationError(f"Unexpected Dropbox error during validation: {e.user_message_text or e}")
except Exception as e:
raise ConnectorValidationError(f"Unexpected error during Dropbox settings validation: {e}")
def _download_file(self, path: str) -> bytes:
"""Download a single file from Dropbox."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
_, resp = self.dropbox_client.files_download(path)
return resp.content
def _get_shared_link(self, path: str) -> str:
"""Create a shared link for a file in Dropbox."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
try:
shared_links = self.dropbox_client.sharing_list_shared_links(path=path)
if shared_links.links:
return shared_links.links[0].url
link_metadata = self.dropbox_client.sharing_create_shared_link_with_settings(path)
return link_metadata.url
except ApiError as err:
logger.exception(f"[Dropbox]: Failed to create a shared link for {path}: {err}")
return ""
def _yield_files_recursive(
self,
path: str,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
) -> GenerateDocumentsOutput:
"""Yield files in batches from a specified Dropbox folder, including subfolders."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
# Collect all files first to count filename occurrences
all_files = []
self._collect_files_recursive(path, start, end, all_files)
# Count filename occurrences
filename_counts: dict[str, int] = {}
for entry, _ in all_files:
filename_counts[entry.name] = filename_counts.get(entry.name, 0) + 1
# Process files in batches
batch: list[Document] = []
for entry, downloaded_file in all_files:
modified_time = entry.client_modified
if modified_time.tzinfo is None:
modified_time = modified_time.replace(tzinfo=timezone.utc)
else:
modified_time = modified_time.astimezone(timezone.utc)
# Use full path only if filename appears multiple times
if filename_counts.get(entry.name, 0) > 1:
# Remove leading slash and replace slashes with ' / '
relative_path = entry.path_display.lstrip('/')
semantic_id = relative_path.replace('/', ' / ') if relative_path else entry.name
else:
semantic_id = entry.name
batch.append(
Document(
id=f"dropbox:{entry.id}",
blob=downloaded_file,
source=DocumentSource.DROPBOX,
semantic_identifier=semantic_id,
extension=get_file_ext(entry.name),
doc_updated_at=modified_time,
size_bytes=entry.size if getattr(entry, "size", None) is not None else len(downloaded_file),
)
)
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def _collect_files_recursive(
self,
path: str,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
all_files: list,
) -> None:
"""Recursively collect all files matching time criteria."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
result = self.dropbox_client.files_list_folder(
path,
recursive=False,
include_non_downloadable_files=False,
)
while True:
for entry in result.entries:
if isinstance(entry, FileMetadata):
modified_time = entry.client_modified
if modified_time.tzinfo is None:
modified_time = modified_time.replace(tzinfo=timezone.utc)
else:
modified_time = modified_time.astimezone(timezone.utc)
time_as_seconds = modified_time.timestamp()
if start is not None and time_as_seconds <= start:
continue
if end is not None and time_as_seconds > end:
continue
try:
downloaded_file = self._download_file(entry.path_display)
all_files.append((entry, downloaded_file))
except Exception:
logger.exception(f"[Dropbox]: Error downloading file {entry.path_display}")
continue
elif isinstance(entry, FolderMetadata):
self._collect_files_recursive(entry.path_lower, start, end, all_files)
if not result.has_more:
break
result = self.dropbox_client.files_list_folder_continue(result.cursor)
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Dropbox for recent file changes"""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
for batch in self._yield_files_recursive("", start, end):
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load files from Dropbox state"""
return self._yield_files_recursive("", None, None)
if __name__ == "__main__":
import os
logging.basicConfig(level=logging.DEBUG)
connector = DropboxConnector()
connector.load_credentials({"dropbox_access_token": os.environ.get("DROPBOX_ACCESS_TOKEN")})
connector.validate_connector_settings()
document_batches = connector.load_from_state()
try:
first_batch = next(document_batches)
print(f"Loaded {len(first_batch)} documents in first batch.")
for doc in first_batch:
print(f"- {doc.semantic_identifier} ({doc.size_bytes} bytes)")
except StopIteration:
print("No documents available in Dropbox.")
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/dropbox_connector.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/exceptions.py | """Exception class definitions"""
class ConnectorMissingCredentialError(Exception):
"""Missing credentials exception"""
def __init__(self, connector_name: str):
super().__init__(f"Missing credentials for {connector_name}")
class ConnectorValidationError(Exception):
"""Connector validation exception"""
pass
class CredentialExpiredError(Exception):
"""Credential expired exception"""
pass
class InsufficientPermissionsError(Exception):
"""Insufficient permissions exception"""
pass
class UnexpectedValidationError(Exception):
"""Unexpected validation exception"""
pass
class RateLimitTriedTooManyTimesError(Exception):
pass | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/exceptions.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/file_types.py | PRESENTATION_MIME_TYPE = (
"application/vnd.openxmlformats-officedocument.presentationml.presentation"
)
SPREADSHEET_MIME_TYPE = (
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
WORD_PROCESSING_MIME_TYPE = (
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
PDF_MIME_TYPE = "application/pdf"
class UploadMimeTypes:
IMAGE_MIME_TYPES = {"image/jpeg", "image/png", "image/webp"}
CSV_MIME_TYPES = {"text/csv"}
TEXT_MIME_TYPES = {
"text/plain",
"text/markdown",
"text/x-markdown",
"text/mdx",
"text/x-config",
"text/tab-separated-values",
"application/json",
"application/xml",
"text/xml",
"application/x-yaml",
}
DOCUMENT_MIME_TYPES = {
PDF_MIME_TYPE,
WORD_PROCESSING_MIME_TYPE,
PRESENTATION_MIME_TYPE,
SPREADSHEET_MIME_TYPE,
"message/rfc822",
"application/epub+zip",
}
ALLOWED_MIME_TYPES = IMAGE_MIME_TYPES.union(
TEXT_MIME_TYPES, DOCUMENT_MIME_TYPES, CSV_MIME_TYPES
)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/file_types.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:common/data_source/gmail_connector.py | import logging
from typing import Any
from google.oauth2.credentials import Credentials as OAuthCredentials
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
from googleapiclient.errors import HttpError
from common.data_source.config import INDEX_BATCH_SIZE, SLIM_BATCH_SIZE, DocumentSource
from common.data_source.google_util.auth import get_google_creds
from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, SCOPE_INSTRUCTIONS, USER_FIELDS
from common.data_source.google_util.resource import get_admin_service, get_gmail_service
from common.data_source.google_util.util import _execute_single_retrieval, execute_paginated_retrieval, clean_string
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync
from common.data_source.models import BasicExpertInfo, Document, ExternalAccess, GenerateDocumentsOutput, GenerateSlimDocumentOutput, SlimDocument, TextSection
from common.data_source.utils import build_time_range_query, clean_email_and_extract_name, get_message_body, is_mail_service_disabled_error, gmail_time_str_to_utc, sanitize_filename
# Constants for Gmail API fields
THREAD_LIST_FIELDS = "nextPageToken, threads(id)"
PARTS_FIELDS = "parts(body(data), mimeType)"
PAYLOAD_FIELDS = f"payload(headers, {PARTS_FIELDS})"
MESSAGES_FIELDS = f"messages(id, {PAYLOAD_FIELDS})"
THREADS_FIELDS = f"threads(id, {MESSAGES_FIELDS})"
THREAD_FIELDS = f"id, {MESSAGES_FIELDS}"
EMAIL_FIELDS = ["cc", "bcc", "from", "to"]
def _get_owners_from_emails(emails: dict[str, str | None]) -> list[BasicExpertInfo]:
"""Convert email dictionary to list of BasicExpertInfo objects."""
owners = []
for email, names in emails.items():
if names:
name_parts = names.split(" ")
first_name = " ".join(name_parts[:-1])
last_name = name_parts[-1]
else:
first_name = None
last_name = None
owners.append(BasicExpertInfo(email=email, first_name=first_name, last_name=last_name))
return owners
def message_to_section(message: dict[str, Any]) -> tuple[TextSection, dict[str, str]]:
"""Convert Gmail message to text section and metadata."""
link = f"https://mail.google.com/mail/u/0/#inbox/{message['id']}"
payload = message.get("payload", {})
headers = payload.get("headers", [])
metadata: dict[str, Any] = {}
for header in headers:
name = header.get("name", "").lower()
value = header.get("value", "")
if name in EMAIL_FIELDS:
metadata[name] = value
if name == "subject":
metadata["subject"] = value
if name == "date":
metadata["updated_at"] = value
if labels := message.get("labelIds"):
metadata["labels"] = labels
message_data = ""
for name, value in metadata.items():
if name != "updated_at":
message_data += f"{name}: {value}\n"
message_body_text: str = get_message_body(payload)
return TextSection(link=link, text=message_body_text + message_data), metadata
def thread_to_document(full_thread: dict[str, Any], email_used_to_fetch_thread: str) -> Document | None:
"""Convert Gmail thread to Document object."""
all_messages = full_thread.get("messages", [])
if not all_messages:
return None
sections = []
semantic_identifier = ""
updated_at = None
from_emails: dict[str, str | None] = {}
other_emails: dict[str, str | None] = {}
for message in all_messages:
section, message_metadata = message_to_section(message)
sections.append(section)
for name, value in message_metadata.items():
if name in EMAIL_FIELDS:
email, display_name = clean_email_and_extract_name(value)
if name == "from":
from_emails[email] = display_name if not from_emails.get(email) else None
else:
other_emails[email] = display_name if not other_emails.get(email) else None
if not semantic_identifier:
semantic_identifier = message_metadata.get("subject", "")
semantic_identifier = clean_string(semantic_identifier)
semantic_identifier = sanitize_filename(semantic_identifier)
if message_metadata.get("updated_at"):
updated_at = message_metadata.get("updated_at")
updated_at_datetime = None
if updated_at:
updated_at_datetime = gmail_time_str_to_utc(updated_at)
thread_id = full_thread.get("id")
if not thread_id:
raise ValueError("Thread ID is required")
primary_owners = _get_owners_from_emails(from_emails)
secondary_owners = _get_owners_from_emails(other_emails)
if not semantic_identifier:
semantic_identifier = "(no subject)"
combined_sections = "\n\n".join(
sec.text for sec in sections if hasattr(sec, "text")
)
blob = combined_sections
size_bytes = len(blob)
extension = '.txt'
return Document(
id=thread_id,
semantic_identifier=semantic_identifier,
blob=blob,
size_bytes=size_bytes,
extension=extension,
source=DocumentSource.GMAIL,
primary_owners=primary_owners,
secondary_owners=secondary_owners,
doc_updated_at=updated_at_datetime,
metadata=message_metadata,
external_access=ExternalAccess(
external_user_emails={email_used_to_fetch_thread},
external_user_group_ids=set(),
is_public=False,
),
)
class GmailConnector(LoadConnector, PollConnector, SlimConnectorWithPermSync):
"""Gmail connector for synchronizing emails from Gmail accounts."""
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self._creds: OAuthCredentials | ServiceAccountCredentials | None = None
self._primary_admin_email: str | None = None
@property
def primary_admin_email(self) -> str:
"""Get primary admin email."""
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email
@property
def google_domain(self) -> str:
"""Get Google domain from email."""
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email.split("@")[-1]
@property
def creds(self) -> OAuthCredentials | ServiceAccountCredentials:
"""Get Google credentials."""
if self._creds is None:
raise RuntimeError("Creds missing, should not call this property before calling load_credentials")
return self._creds
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, str] | None:
"""Load Gmail credentials."""
primary_admin_email = credentials[DB_CREDENTIALS_PRIMARY_ADMIN_KEY]
self._primary_admin_email = primary_admin_email
self._creds, new_creds_dict = get_google_creds(
credentials=credentials,
source=DocumentSource.GMAIL,
)
return new_creds_dict
def _get_all_user_emails(self) -> list[str]:
"""Get all user emails for Google Workspace domain."""
try:
admin_service = get_admin_service(self.creds, self.primary_admin_email)
emails = []
for user in execute_paginated_retrieval(
retrieval_function=admin_service.users().list,
list_key="users",
fields=USER_FIELDS,
domain=self.google_domain,
):
if email := user.get("primaryEmail"):
emails.append(email)
return emails
except HttpError as e:
if e.resp.status == 404:
logging.warning("Received 404 from Admin SDK; this may indicate a personal Gmail account with no Workspace domain. Falling back to single user.")
return [self.primary_admin_email]
raise
except Exception:
raise
def _fetch_threads(
self,
time_range_start: SecondsSinceUnixEpoch | None = None,
time_range_end: SecondsSinceUnixEpoch | None = None,
) -> GenerateDocumentsOutput:
"""Fetch Gmail threads within time range."""
query = build_time_range_query(time_range_start, time_range_end)
doc_batch = []
for user_email in self._get_all_user_emails():
gmail_service = get_gmail_service(self.creds, user_email)
try:
for thread in execute_paginated_retrieval(
retrieval_function=gmail_service.users().threads().list,
list_key="threads",
userId=user_email,
fields=THREAD_LIST_FIELDS,
q=query,
continue_on_404_or_403=True,
):
full_thread = _execute_single_retrieval(
retrieval_function=gmail_service.users().threads().get,
userId=user_email,
fields=THREAD_FIELDS,
id=thread["id"],
continue_on_404_or_403=True,
)
doc = thread_to_document(full_thread, user_email)
if doc is None:
continue
doc_batch.append(doc)
if len(doc_batch) > self.batch_size:
yield doc_batch
doc_batch = []
except HttpError as e:
if is_mail_service_disabled_error(e):
logging.warning(
"Skipping Gmail sync for %s because the mailbox is disabled.",
user_email,
)
continue
raise
if doc_batch:
yield doc_batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load all documents from Gmail."""
try:
yield from self._fetch_threads()
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError(SCOPE_INSTRUCTIONS) from e
raise e
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Gmail for documents within time range."""
try:
yield from self._fetch_threads(start, end)
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError(SCOPE_INSTRUCTIONS) from e
raise e
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback=None,
) -> GenerateSlimDocumentOutput:
"""Retrieve slim documents for permission synchronization."""
query = build_time_range_query(start, end)
doc_batch = []
for user_email in self._get_all_user_emails():
logging.info(f"Fetching slim threads for user: {user_email}")
gmail_service = get_gmail_service(self.creds, user_email)
try:
for thread in execute_paginated_retrieval(
retrieval_function=gmail_service.users().threads().list,
list_key="threads",
userId=user_email,
fields=THREAD_LIST_FIELDS,
q=query,
continue_on_404_or_403=True,
):
doc_batch.append(
SlimDocument(
id=thread["id"],
external_access=ExternalAccess(
external_user_emails={user_email},
external_user_group_ids=set(),
is_public=False,
),
)
)
if len(doc_batch) > SLIM_BATCH_SIZE:
yield doc_batch
doc_batch = []
except HttpError as e:
if is_mail_service_disabled_error(e):
logging.warning(
"Skipping slim Gmail sync for %s because the mailbox is disabled.",
user_email,
)
continue
raise
if doc_batch:
yield doc_batch
if __name__ == "__main__":
import time
import os
from common.data_source.google_util.util import get_credentials_from_env
logging.basicConfig(level=logging.INFO)
try:
email = os.environ.get("GMAIL_TEST_EMAIL", "newyorkupperbay@gmail.com")
creds = get_credentials_from_env(email, oauth=True, source="gmail")
print("Credentials loaded successfully")
print(f"{creds=}")
connector = GmailConnector(batch_size=2)
print("GmailConnector initialized")
connector.load_credentials(creds)
print("Credentials loaded into connector")
print("Gmail is ready to use")
for file in connector._fetch_threads(
int(time.time()) - 1 * 24 * 60 * 60,
int(time.time()),
):
print("new batch","-"*80)
for f in file:
print(f)
print("\n\n")
except Exception as e:
logging.exception(f"Error loading credentials: {e}") | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/gmail_connector.py",
"license": "Apache License 2.0",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/html_utils.py | import logging
import re
from copy import copy
from dataclasses import dataclass
from io import BytesIO
from typing import IO
import bs4
from common.data_source.config import HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY, \
HtmlBasedConnectorTransformLinksStrategy, WEB_CONNECTOR_IGNORED_CLASSES, WEB_CONNECTOR_IGNORED_ELEMENTS, \
PARSE_WITH_TRAFILATURA
MINTLIFY_UNWANTED = ["sticky", "hidden"]
@dataclass
class ParsedHTML:
title: str | None
cleaned_text: str
def strip_excessive_newlines_and_spaces(document: str) -> str:
# collapse repeated spaces into one
document = re.sub(r" +", " ", document)
# remove trailing spaces
document = re.sub(r" +[\n\r]", "\n", document)
# remove repeated newlines
document = re.sub(r"[\n\r]+", "\n", document)
return document.strip()
def strip_newlines(document: str) -> str:
# HTML might contain newlines which are just whitespaces to a browser
return re.sub(r"[\n\r]+", " ", document)
def format_element_text(element_text: str, link_href: str | None) -> str:
element_text_no_newlines = strip_newlines(element_text)
if (
not link_href
or HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY
== HtmlBasedConnectorTransformLinksStrategy.STRIP
):
return element_text_no_newlines
return f"[{element_text_no_newlines}]({link_href})"
def parse_html_with_trafilatura(html_content: str) -> str:
"""Parse HTML content using trafilatura."""
import trafilatura # type: ignore
from trafilatura.settings import use_config # type: ignore
config = use_config()
config.set("DEFAULT", "include_links", "True")
config.set("DEFAULT", "include_tables", "True")
config.set("DEFAULT", "include_images", "True")
config.set("DEFAULT", "include_formatting", "True")
extracted_text = trafilatura.extract(html_content, config=config)
return strip_excessive_newlines_and_spaces(extracted_text) if extracted_text else ""
def format_document_soup(
document: bs4.BeautifulSoup, table_cell_separator: str = "\t"
) -> str:
"""Format html to a flat text document.
The following goals:
- Newlines from within the HTML are removed (as browser would ignore them as well).
- Repeated newlines/spaces are removed (as browsers would ignore them).
- Newlines only before and after headlines and paragraphs or when explicit (br or pre tag)
- Table columns/rows are separated by newline
- List elements are separated by newline and start with a hyphen
"""
text = ""
list_element_start = False
verbatim_output = 0
in_table = False
last_added_newline = False
link_href: str | None = None
for e in document.descendants:
verbatim_output -= 1
if isinstance(e, bs4.element.NavigableString):
if isinstance(e, (bs4.element.Comment, bs4.element.Doctype)):
continue
element_text = e.text
if in_table:
# Tables are represented in natural language with rows separated by newlines
# Can't have newlines then in the table elements
element_text = element_text.replace("\n", " ").strip()
# Some tags are translated to spaces but in the logic underneath this section, we
# translate them to newlines as a browser should render them such as with br
# This logic here avoids a space after newline when it shouldn't be there.
if last_added_newline and element_text.startswith(" "):
element_text = element_text[1:]
last_added_newline = False
if element_text:
content_to_add = (
element_text
if verbatim_output > 0
else format_element_text(element_text, link_href)
)
# Don't join separate elements without any spacing
if (text and not text[-1].isspace()) and (
content_to_add and not content_to_add[0].isspace()
):
text += " "
text += content_to_add
list_element_start = False
elif isinstance(e, bs4.element.Tag):
# table is standard HTML element
if e.name == "table":
in_table = True
# TR is for rows
elif e.name == "tr" and in_table:
text += "\n"
# td for data cell, th for header
elif e.name in ["td", "th"] and in_table:
text += table_cell_separator
elif e.name == "/table":
in_table = False
elif in_table:
# don't handle other cases while in table
pass
elif e.name == "a":
href_value = e.get("href", None)
# mostly for typing, having multiple hrefs is not valid HTML
link_href = (
href_value[0] if isinstance(href_value, list) else href_value
)
elif e.name == "/a":
link_href = None
elif e.name in ["p", "div"]:
if not list_element_start:
text += "\n"
elif e.name in ["h1", "h2", "h3", "h4"]:
text += "\n"
list_element_start = False
last_added_newline = True
elif e.name == "br":
text += "\n"
list_element_start = False
last_added_newline = True
elif e.name == "li":
text += "\n- "
list_element_start = True
elif e.name == "pre":
if verbatim_output <= 0:
verbatim_output = len(list(e.childGenerator()))
return strip_excessive_newlines_and_spaces(text)
def parse_html_page_basic(text: str | BytesIO | IO[bytes]) -> str:
soup = bs4.BeautifulSoup(text, "html.parser")
return format_document_soup(soup)
def web_html_cleanup(
page_content: str | bs4.BeautifulSoup,
mintlify_cleanup_enabled: bool = True,
additional_element_types_to_discard: list[str] | None = None,
) -> ParsedHTML:
if isinstance(page_content, str):
soup = bs4.BeautifulSoup(page_content, "html.parser")
else:
soup = page_content
title_tag = soup.find("title")
title = None
if title_tag and title_tag.text:
title = title_tag.text
title_tag.extract()
# Heuristics based cleaning of elements based on css classes
unwanted_classes = copy(WEB_CONNECTOR_IGNORED_CLASSES)
if mintlify_cleanup_enabled:
unwanted_classes.extend(MINTLIFY_UNWANTED)
for undesired_element in unwanted_classes:
[
tag.extract()
for tag in soup.find_all(
class_=lambda x: x and undesired_element in x.split()
)
]
for undesired_tag in WEB_CONNECTOR_IGNORED_ELEMENTS:
[tag.extract() for tag in soup.find_all(undesired_tag)]
if additional_element_types_to_discard:
for undesired_tag in additional_element_types_to_discard:
[tag.extract() for tag in soup.find_all(undesired_tag)]
soup_string = str(soup)
page_text = ""
if PARSE_WITH_TRAFILATURA:
try:
page_text = parse_html_with_trafilatura(soup_string)
if not page_text:
raise ValueError("Empty content returned by trafilatura.")
except Exception as e:
logging.info(f"Trafilatura parsing failed: {e}. Falling back on bs4.")
page_text = format_document_soup(soup)
else:
page_text = format_document_soup(soup)
# 200B is ZeroWidthSpace which we don't care for
cleaned_text = page_text.replace("\u200b", "")
return ParsedHTML(title=title, cleaned_text=cleaned_text)
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/html_utils.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/interfaces.py | """Interface definitions"""
import abc
import uuid
from abc import ABC, abstractmethod
from enum import IntFlag, auto
from types import TracebackType
from typing import Any, Dict, Generator, TypeVar, Generic, Callable, TypeAlias
from collections.abc import Iterator
from anthropic import BaseModel
from common.data_source.models import (
Document,
SlimDocument,
ConnectorCheckpoint,
ConnectorFailure,
SecondsSinceUnixEpoch, GenerateSlimDocumentOutput
)
GenerateDocumentsOutput = Iterator[list[Document]]
class LoadConnector(ABC):
"""Load connector interface"""
@abstractmethod
def load_credentials(self, credentials: Dict[str, Any]) -> Dict[str, Any] | None:
"""Load credentials"""
pass
@abstractmethod
def load_from_state(self) -> Generator[list[Document], None, None]:
"""Load documents from state"""
pass
def validate_connector_settings(self) -> None:
"""Validate connector settings"""
pass
class PollConnector(ABC):
"""Poll connector interface"""
@abstractmethod
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Generator[list[Document], None, None]:
"""Poll source to get documents"""
pass
class CredentialsConnector(ABC):
"""Credentials connector interface"""
@abstractmethod
def load_credentials(self, credentials: Dict[str, Any]) -> Dict[str, Any] | None:
"""Load credentials"""
pass
class SlimConnectorWithPermSync(ABC):
"""Simplified connector interface (with permission sync)"""
@abstractmethod
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> Generator[list[SlimDocument], None, None]:
"""Retrieve all simplified documents (with permission sync)"""
pass
class CheckpointedConnectorWithPermSync(ABC):
"""Checkpoint connector interface (with permission sync)"""
@abstractmethod
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Generator[Document | ConnectorFailure, None, ConnectorCheckpoint]:
"""Load documents from checkpoint"""
pass
@abstractmethod
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Generator[Document | ConnectorFailure, None, ConnectorCheckpoint]:
"""Load documents from checkpoint (with permission sync)"""
pass
@abstractmethod
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
pass
@abstractmethod
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
pass
T = TypeVar("T", bound="CredentialsProviderInterface")
class CredentialsProviderInterface(abc.ABC, Generic[T]):
@abc.abstractmethod
def __enter__(self) -> T:
raise NotImplementedError
@abc.abstractmethod
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
raise NotImplementedError
@abc.abstractmethod
def get_tenant_id(self) -> str | None:
raise NotImplementedError
@abc.abstractmethod
def get_provider_key(self) -> str:
"""a unique key that the connector can use to lock around a credential
that might be used simultaneously.
Will typically be the credential id, but can also just be something random
in cases when there is nothing to lock (aka static credentials)
"""
raise NotImplementedError
@abc.abstractmethod
def get_credentials(self) -> dict[str, Any]:
raise NotImplementedError
@abc.abstractmethod
def set_credentials(self, credential_json: dict[str, Any]) -> None:
raise NotImplementedError
@abc.abstractmethod
def is_dynamic(self) -> bool:
"""If dynamic, the credentials may change during usage ... meaning the client
needs to use the locking features of the credentials provider to operate
correctly.
If static, the client can simply reference the credentials once and use them
through the entire indexing run.
"""
raise NotImplementedError
class StaticCredentialsProvider(
CredentialsProviderInterface["StaticCredentialsProvider"]
):
"""Implementation (a very simple one!) to handle static credentials."""
def __init__(
self,
tenant_id: str | None,
connector_name: str,
credential_json: dict[str, Any],
):
self._tenant_id = tenant_id
self._connector_name = connector_name
self._credential_json = credential_json
self._provider_key = str(uuid.uuid4())
def __enter__(self) -> "StaticCredentialsProvider":
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def get_tenant_id(self) -> str | None:
return self._tenant_id
def get_provider_key(self) -> str:
return self._provider_key
def get_credentials(self) -> dict[str, Any]:
return self._credential_json
def set_credentials(self, credential_json: dict[str, Any]) -> None:
self._credential_json = credential_json
def is_dynamic(self) -> bool:
return False
CT = TypeVar("CT", bound=ConnectorCheckpoint)
class BaseConnector(abc.ABC, Generic[CT]):
REDIS_KEY_PREFIX = "da_connector_data:"
# Common image file extensions supported across connectors
IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".webp", ".gif"}
@abc.abstractmethod
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
raise NotImplementedError
@staticmethod
def parse_metadata(metadata: dict[str, Any]) -> list[str]:
"""Parse the metadata for a document/chunk into a string to pass to Generative AI as additional context"""
custom_parser_req_msg = (
"Specific metadata parsing required, connector has not implemented it."
)
metadata_lines = []
for metadata_key, metadata_value in metadata.items():
if isinstance(metadata_value, str):
metadata_lines.append(f"{metadata_key}: {metadata_value}")
elif isinstance(metadata_value, list):
if not all([isinstance(val, str) for val in metadata_value]):
raise RuntimeError(custom_parser_req_msg)
metadata_lines.append(f'{metadata_key}: {", ".join(metadata_value)}')
else:
raise RuntimeError(custom_parser_req_msg)
return metadata_lines
def validate_connector_settings(self) -> None:
"""
Override this if your connector needs to validate credentials or settings.
Raise an exception if invalid, otherwise do nothing.
Default is a no-op (always successful).
"""
def validate_perm_sync(self) -> None:
"""
Permission-sync validation hook.
RAGFlow doesn't ship the Onyx EE permission-sync validation package.
Connectors that support permission sync should override
`validate_connector_settings()` as needed.
"""
return None
def set_allow_images(self, value: bool) -> None:
"""Implement if the underlying connector wants to skip/allow image downloading
based on the application level image analysis setting."""
def build_dummy_checkpoint(self) -> CT:
# TODO: find a way to make this work without type: ignore
return ConnectorCheckpoint(has_more=True) # type: ignore
CheckpointOutput: TypeAlias = Generator[Document | ConnectorFailure, None, CT]
LoadFunction = Callable[[CT], CheckpointOutput[CT]]
class CheckpointedConnector(BaseConnector[CT]):
@abc.abstractmethod
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: CT,
) -> CheckpointOutput[CT]:
"""Yields back documents or failures. Final return is the new checkpoint.
Final return can be access via either:
```
try:
for document_or_failure in connector.load_from_checkpoint(start, end, checkpoint):
print(document_or_failure)
except StopIteration as e:
checkpoint = e.value # Extracting the return value
print(checkpoint)
```
OR
```
checkpoint = yield from connector.load_from_checkpoint(start, end, checkpoint)
```
"""
raise NotImplementedError
@abc.abstractmethod
def build_dummy_checkpoint(self) -> CT:
raise NotImplementedError
@abc.abstractmethod
def validate_checkpoint_json(self, checkpoint_json: str) -> CT:
"""Validate the checkpoint json and return the checkpoint object"""
raise NotImplementedError
class CheckpointOutputWrapper(Generic[CT]):
"""
Wraps a CheckpointOutput generator to give things back in a more digestible format,
specifically for Document outputs.
The connector format is easier for the connector implementor (e.g. it enforces exactly
one new checkpoint is returned AND that the checkpoint is at the end), thus the different
formats.
"""
def __init__(self) -> None:
self.next_checkpoint: CT | None = None
def __call__(
self,
checkpoint_connector_generator: CheckpointOutput[CT],
) -> Generator[
tuple[Document | None, ConnectorFailure | None, CT | None],
None,
None,
]:
# grabs the final return value and stores it in the `next_checkpoint` variable
def _inner_wrapper(
checkpoint_connector_generator: CheckpointOutput[CT],
) -> CheckpointOutput[CT]:
self.next_checkpoint = yield from checkpoint_connector_generator
return self.next_checkpoint # not used
for document_or_failure in _inner_wrapper(checkpoint_connector_generator):
if isinstance(document_or_failure, Document):
yield document_or_failure, None, None
elif isinstance(document_or_failure, ConnectorFailure):
yield None, document_or_failure, None
else:
raise ValueError(
f"Invalid document_or_failure type: {type(document_or_failure)}"
)
if self.next_checkpoint is None:
raise RuntimeError(
"Checkpoint is None. This should never happen - the connector should always return a checkpoint."
)
yield None, None, self.next_checkpoint
class CheckpointedConnectorWithPermSyncGH(CheckpointedConnector[CT]):
@abc.abstractmethod
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: CT,
) -> CheckpointOutput[CT]:
raise NotImplementedError
# Slim connectors retrieve just the ids of documents
class SlimConnector(BaseConnector):
@abc.abstractmethod
def retrieve_all_slim_docs(
self,
) -> GenerateSlimDocumentOutput:
raise NotImplementedError
class ConfluenceUser(BaseModel):
user_id: str # accountId in Cloud, userKey in Server
username: str | None # Confluence Cloud doesn't give usernames
display_name: str
# Confluence Data Center doesn't give email back by default,
# have to fetch it with a different endpoint
email: str | None
type: str
class TokenResponse(BaseModel):
access_token: str
expires_in: int
token_type: str
refresh_token: str
scope: str
class OnyxExtensionType(IntFlag):
Plain = auto()
Document = auto()
Multimedia = auto()
All = Plain | Document | Multimedia
class AttachmentProcessingResult(BaseModel):
"""
A container for results after processing a Confluence attachment.
'text' is the textual content of the attachment.
'file_name' is the final file name used in FileStore to store the content.
'error' holds an exception or string if something failed.
"""
text: str | None
file_blob: bytes | bytearray | None
file_name: str | None
error: str | None = None
model_config = {"arbitrary_types_allowed": True}
class IndexingHeartbeatInterface(ABC):
"""Defines a callback interface to be passed to run_indexing_entrypoint."""
@abstractmethod
def should_stop(self) -> bool:
"""Signal to stop the looping function in flight."""
@abstractmethod
def progress(self, tag: str, amount: int) -> None:
"""Send progress updates to the caller.
Amount can be a positive number to indicate progress or <= 0
just to act as a keep-alive.
"""
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/interfaces.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/models.py | """Data model definitions for all connectors"""
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Optional, List, Sequence, NamedTuple
from typing_extensions import TypedDict, NotRequired
from pydantic import BaseModel
from enum import Enum
@dataclass(frozen=True)
class ExternalAccess:
# arbitrary limit to prevent excessively large permissions sets
# not internally enforced ... the caller can check this before using the instance
MAX_NUM_ENTRIES = 5000
# Emails of external users with access to the doc externally
external_user_emails: set[str]
# Names or external IDs of groups with access to the doc
external_user_group_ids: set[str]
# Whether the document is public in the external system or Onyx
is_public: bool
def __str__(self) -> str:
"""Prevent extremely long logs"""
def truncate_set(s: set[str], max_len: int = 100) -> str:
s_str = str(s)
if len(s_str) > max_len:
return f"{s_str[:max_len]}... ({len(s)} items)"
return s_str
return (
f"ExternalAccess("
f"external_user_emails={truncate_set(self.external_user_emails)}, "
f"external_user_group_ids={truncate_set(self.external_user_group_ids)}, "
f"is_public={self.is_public})"
)
@property
def num_entries(self) -> int:
return len(self.external_user_emails) + len(self.external_user_group_ids)
@classmethod
def public(cls) -> "ExternalAccess":
return cls(
external_user_emails=set(),
external_user_group_ids=set(),
is_public=True,
)
@classmethod
def empty(cls) -> "ExternalAccess":
"""
A helper function that returns an *empty* set of external user-emails and group-ids, and sets `is_public` to `False`.
This effectively makes the document in question "private" or inaccessible to anyone else.
This is especially helpful to use when you are performing permission-syncing, and some document's permissions can't
be determined (for whatever reason). Setting its `ExternalAccess` to "private" is a feasible fallback.
"""
return cls(
external_user_emails=set(),
external_user_group_ids=set(),
is_public=False,
)
class ExtractionResult(NamedTuple):
"""Structured result from text and image extraction from various file types."""
text_content: str
embedded_images: Sequence[tuple[bytes, str]]
metadata: dict[str, Any]
class TextSection(BaseModel):
"""Text section model"""
link: str
text: str
class ImageSection(BaseModel):
"""Image section model"""
link: str
image_file_id: str
class Document(BaseModel):
"""Document model"""
id: str
source: str
semantic_identifier: str
extension: str
blob: bytes
doc_updated_at: datetime
size_bytes: int
externale_access: Optional[ExternalAccess] = None
primary_owners: Optional[list] = None
metadata: Optional[dict[str, Any]] = None
doc_metadata: Optional[dict[str, Any]] = None
class BasicExpertInfo(BaseModel):
"""Expert information model"""
display_name: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
email: Optional[str] = None
def get_semantic_name(self) -> str:
"""Get semantic name for display"""
if self.display_name:
return self.display_name
elif self.first_name and self.last_name:
return f"{self.first_name} {self.last_name}"
elif self.first_name:
return self.first_name
elif self.last_name:
return self.last_name
else:
return "Unknown"
class SlimDocument(BaseModel):
"""Simplified document model (contains only ID and permission info)"""
id: str
external_access: Optional[Any] = None
class ConnectorCheckpoint(BaseModel):
"""Connector checkpoint model"""
has_more: bool = True
class DocumentFailure(BaseModel):
"""Document processing failure information"""
document_id: str
document_link: str
class EntityFailure(BaseModel):
"""Entity processing failure information"""
entity_id: str
missed_time_range: tuple[datetime, datetime]
class ConnectorFailure(BaseModel):
"""Connector failure information"""
failed_document: Optional[DocumentFailure] = None
failed_entity: Optional[EntityFailure] = None
failure_message: str
exception: Optional[Exception] = None
model_config = {"arbitrary_types_allowed": True}
# Gmail Models
class GmailCredentials(BaseModel):
"""Gmail authentication credentials model"""
primary_admin_email: str
credentials: dict[str, Any]
class GmailThread(BaseModel):
"""Gmail thread data model"""
id: str
messages: list[dict[str, Any]]
class GmailMessage(BaseModel):
"""Gmail message data model"""
id: str
payload: dict[str, Any]
label_ids: Optional[list[str]] = None
# Notion Models
class NotionPage(BaseModel):
"""Represents a Notion Page object"""
id: str
created_time: str
last_edited_time: str
archived: bool
properties: dict[str, Any]
url: str
parent: Optional[dict[str, Any]] = None # Parent reference for path reconstruction
database_name: Optional[str] = None # Only applicable to database type pages
class NotionBlock(BaseModel):
"""Represents a Notion Block object"""
id: str # Used for the URL
text: str
prefix: str # How this block should be joined with existing text
class NotionSearchResponse(BaseModel):
"""Represents the response from the Notion Search API"""
results: list[dict[str, Any]]
next_cursor: Optional[str]
has_more: bool = False
class NotionCredentials(BaseModel):
"""Notion authentication credentials model"""
integration_token: str
# Slack Models
class ChannelTopicPurposeType(TypedDict):
"""Slack channel topic or purpose"""
value: str
creator: str
last_set: int
class ChannelType(TypedDict):
"""Slack channel"""
id: str
name: str
is_channel: bool
is_group: bool
is_im: bool
created: int
creator: str
is_archived: bool
is_general: bool
unlinked: int
name_normalized: str
is_shared: bool
is_ext_shared: bool
is_org_shared: bool
pending_shared: List[str]
is_pending_ext_shared: bool
is_member: bool
is_private: bool
is_mpim: bool
updated: int
topic: ChannelTopicPurposeType
purpose: ChannelTopicPurposeType
previous_names: List[str]
num_members: int
class AttachmentType(TypedDict):
"""Slack message attachment"""
service_name: NotRequired[str]
text: NotRequired[str]
fallback: NotRequired[str]
thumb_url: NotRequired[str]
thumb_width: NotRequired[int]
thumb_height: NotRequired[int]
id: NotRequired[int]
class BotProfileType(TypedDict):
"""Slack bot profile"""
id: NotRequired[str]
deleted: NotRequired[bool]
name: NotRequired[str]
updated: NotRequired[int]
app_id: NotRequired[str]
team_id: NotRequired[str]
class MessageType(TypedDict):
"""Slack message"""
type: str
user: str
text: str
ts: str
attachments: NotRequired[List[AttachmentType]]
bot_id: NotRequired[str]
app_id: NotRequired[str]
bot_profile: NotRequired[BotProfileType]
thread_ts: NotRequired[str]
subtype: NotRequired[str]
# Thread message list
ThreadType = List[MessageType]
class SlackCheckpoint(TypedDict):
"""Slack checkpoint"""
channel_ids: List[str] | None
channel_completion_map: dict[str, str]
current_channel: ChannelType | None
current_channel_access: Any | None
seen_thread_ts: List[str]
has_more: bool
class SlackMessageFilterReason(str):
"""Slack message filter reason"""
BOT = "bot"
DISALLOWED = "disallowed"
class ProcessedSlackMessage:
"""Processed Slack message"""
def __init__(self, doc=None, thread_or_message_ts=None, filter_reason=None, failure=None):
self.doc = doc
self.thread_or_message_ts = thread_or_message_ts
self.filter_reason = filter_reason
self.failure = failure
class SeafileSyncScope(str, Enum):
"""Defines how much of SeaFile to synchronise."""
ACCOUNT = "account" # All libraries the token can see
LIBRARY = "library" # A single library (repo)
DIRECTORY = "directory" # A single directory inside a library
# Type aliases for type hints
SecondsSinceUnixEpoch = float
GenerateDocumentsOutput = Any
GenerateSlimDocumentOutput = Any
CheckpointOutput = Any
| {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/models.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/notion_connector.py | import html
import logging
from collections.abc import Generator
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
from urllib.parse import urlparse
from retry import retry
from common.data_source.config import (
INDEX_BATCH_SIZE,
NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP,
DocumentSource,
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import (
LoadConnector,
PollConnector,
SecondsSinceUnixEpoch,
)
from common.data_source.models import (
Document,
GenerateDocumentsOutput,
NotionBlock,
NotionPage,
NotionSearchResponse,
TextSection,
)
from common.data_source.utils import (
batch_generator,
datetime_from_string,
fetch_notion_data,
filter_pages_by_time,
properties_to_str,
rl_requests,
)
class NotionConnector(LoadConnector, PollConnector):
"""Notion Page connector that reads all Notion pages this integration has access to.
Arguments:
batch_size (int): Number of objects to index in a batch
recursive_index_enabled (bool): Whether to recursively index child pages
root_page_id (str | None): Specific root page ID to start indexing from
"""
def __init__(
self,
batch_size: int = INDEX_BATCH_SIZE,
recursive_index_enabled: bool = not NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP,
root_page_id: Optional[str] = None,
) -> None:
self.batch_size = batch_size
self.headers = {
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
self.indexed_pages: set[str] = set()
self.root_page_id = root_page_id
self.recursive_index_enabled = recursive_index_enabled or bool(root_page_id)
self.page_path_cache: dict[str, str] = {}
@retry(tries=3, delay=1, backoff=2)
def _fetch_child_blocks(self, block_id: str, cursor: Optional[str] = None) -> dict[str, Any] | None:
"""Fetch all child blocks via the Notion API."""
logging.debug(f"[Notion]: Fetching children of block with ID {block_id}")
block_url = f"https://api.notion.com/v1/blocks/{block_id}/children"
query_params = {"start_cursor": cursor} if cursor else None
try:
response = rl_requests.get(
block_url,
headers=self.headers,
params=query_params,
timeout=30,
)
response.raise_for_status()
return response.json()
except Exception as e:
if hasattr(e, "response") and e.response.status_code == 404:
logging.error(f"[Notion]: Unable to access block with ID {block_id}. This is likely due to the block not being shared with the integration.")
return None
else:
logging.exception(f"[Notion]: Error fetching blocks: {e}")
raise
@retry(tries=3, delay=1, backoff=2)
def _fetch_page(self, page_id: str) -> NotionPage:
"""Fetch a page from its ID via the Notion API."""
logging.debug(f"[Notion]: Fetching page for ID {page_id}")
page_url = f"https://api.notion.com/v1/pages/{page_id}"
try:
data = fetch_notion_data(page_url, self.headers, "GET")
return NotionPage(**data)
except Exception as e:
logging.warning(f"[Notion]: Failed to fetch page, trying database for ID {page_id}: {e}")
return self._fetch_database_as_page(page_id)
@retry(tries=3, delay=1, backoff=2)
def _fetch_database_as_page(self, database_id: str) -> NotionPage:
"""Attempt to fetch a database as a page."""
logging.debug(f"[Notion]: Fetching database for ID {database_id} as a page")
database_url = f"https://api.notion.com/v1/databases/{database_id}"
data = fetch_notion_data(database_url, self.headers, "GET")
database_name = data.get("title")
database_name = database_name[0].get("text", {}).get("content") if database_name else None
return NotionPage(**data, database_name=database_name)
@retry(tries=3, delay=1, backoff=2)
def _fetch_database(self, database_id: str, cursor: Optional[str] = None) -> dict[str, Any]:
"""Fetch a database from its ID via the Notion API."""
logging.debug(f"[Notion]: Fetching database for ID {database_id}")
block_url = f"https://api.notion.com/v1/databases/{database_id}/query"
body = {"start_cursor": cursor} if cursor else None
try:
data = fetch_notion_data(block_url, self.headers, "POST", body)
return data
except Exception as e:
if hasattr(e, "response") and e.response.status_code in [404, 400]:
logging.error(f"[Notion]: Unable to access database with ID {database_id}. This is likely due to the database not being shared with the integration.")
return {"results": [], "next_cursor": None}
raise
def _read_pages_from_database(self, database_id: str) -> tuple[list[NotionBlock], list[str]]:
"""Returns a list of top level blocks and all page IDs in the database."""
result_blocks: list[NotionBlock] = []
result_pages: list[str] = []
cursor = None
while True:
data = self._fetch_database(database_id, cursor)
for result in data["results"]:
obj_id = result["id"]
obj_type = result["object"]
text = properties_to_str(result.get("properties", {}))
if text:
result_blocks.append(NotionBlock(id=obj_id, text=text, prefix="\n"))
if self.recursive_index_enabled:
if obj_type == "page":
logging.debug(f"[Notion]: Found page with ID {obj_id} in database {database_id}")
result_pages.append(result["id"])
elif obj_type == "database":
logging.debug(f"[Notion]: Found database with ID {obj_id} in database {database_id}")
_, child_pages = self._read_pages_from_database(obj_id)
result_pages.extend(child_pages)
if data["next_cursor"] is None:
break
cursor = data["next_cursor"]
return result_blocks, result_pages
def _extract_rich_text(self, rich_text_array: list[dict[str, Any]]) -> str:
collected_text: list[str] = []
for rich_text in rich_text_array:
content = ""
r_type = rich_text.get("type")
if r_type == "equation":
expr = rich_text.get("equation", {}).get("expression")
if expr:
content = expr
elif r_type == "mention":
mention = rich_text.get("mention", {}) or {}
mention_type = mention.get("type")
mention_value = mention.get(mention_type, {}) if mention_type else {}
if mention_type == "date":
start = mention_value.get("start")
end = mention_value.get("end")
if start and end:
content = f"{start} - {end}"
elif start:
content = start
elif mention_type in {"page", "database"}:
content = mention_value.get("id", rich_text.get("plain_text", ""))
elif mention_type == "link_preview":
content = mention_value.get("url", rich_text.get("plain_text", ""))
else:
content = rich_text.get("plain_text", "") or str(mention_value)
else:
if rich_text.get("plain_text"):
content = rich_text["plain_text"]
elif "text" in rich_text and rich_text["text"].get("content"):
content = rich_text["text"]["content"]
href = rich_text.get("href")
if content and href:
content = f"{content} ({href})"
if content:
collected_text.append(content)
return "".join(collected_text).strip()
def _build_table_html(self, table_block_id: str) -> str | None:
rows: list[str] = []
cursor = None
while True:
data = self._fetch_child_blocks(table_block_id, cursor)
if data is None:
break
for result in data["results"]:
if result.get("type") != "table_row":
continue
cells_html: list[str] = []
for cell in result["table_row"].get("cells", []):
cell_text = self._extract_rich_text(cell)
cell_html = html.escape(cell_text) if cell_text else ""
cells_html.append(f"<td>{cell_html}</td>")
rows.append(f"<tr>{''.join(cells_html)}</tr>")
if data.get("next_cursor") is None:
break
cursor = data["next_cursor"]
if not rows:
return None
return "<table>\n" + "\n".join(rows) + "\n</table>"
def _download_file(self, url: str) -> bytes | None:
try:
response = rl_requests.get(url, timeout=60)
response.raise_for_status()
return response.content
except Exception as exc:
logging.warning(f"[Notion]: Failed to download Notion file from {url}: {exc}")
return None
def _append_block_id_to_name(self, name: str, block_id: Optional[str]) -> str:
"""Append the Notion block ID to the filename while keeping the extension."""
if not block_id:
return name
path = Path(name)
stem = path.stem or name
suffix = path.suffix
if not stem:
return name
return f"{stem}_{block_id}{suffix}" if suffix else f"{stem}_{block_id}"
def _extract_file_metadata(self, result_obj: dict[str, Any], block_id: str) -> tuple[str | None, str, str | None]:
file_source_type = result_obj.get("type")
file_source = result_obj.get(file_source_type, {}) if file_source_type else {}
url = file_source.get("url")
name = result_obj.get("name") or file_source.get("name")
if url and not name:
parsed_name = Path(urlparse(url).path).name
name = parsed_name or f"notion_file_{block_id}"
elif not name:
name = f"notion_file_{block_id}"
name = self._append_block_id_to_name(name, block_id)
caption = self._extract_rich_text(result_obj.get("caption", [])) if "caption" in result_obj else None
return url, name, caption
def _build_attachment_document(
self,
block_id: str,
url: str,
name: str,
caption: Optional[str],
page_last_edited_time: Optional[str],
page_path: Optional[str],
) -> Document | None:
file_bytes = self._download_file(url)
if file_bytes is None:
return None
extension = Path(name).suffix or Path(urlparse(url).path).suffix or ".bin"
if extension and not extension.startswith("."):
extension = f".{extension}"
if not extension:
extension = ".bin"
updated_at = datetime_from_string(page_last_edited_time) if page_last_edited_time else datetime.now(timezone.utc)
base_identifier = name or caption or (f"Notion file {block_id}" if block_id else "Notion file")
semantic_identifier = f"{page_path} / {base_identifier}" if page_path else base_identifier
return Document(
id=block_id,
blob=file_bytes,
source=DocumentSource.NOTION,
semantic_identifier=semantic_identifier,
extension=extension,
size_bytes=len(file_bytes),
doc_updated_at=updated_at,
)
def _read_blocks(self, base_block_id: str, page_last_edited_time: Optional[str] = None, page_path: Optional[str] = None) -> tuple[list[NotionBlock], list[str], list[Document]]:
result_blocks: list[NotionBlock] = []
child_pages: list[str] = []
attachments: list[Document] = []
cursor = None
while True:
data = self._fetch_child_blocks(base_block_id, cursor)
if data is None:
return result_blocks, child_pages, attachments
for result in data["results"]:
logging.debug(f"[Notion]: Found child block for block with ID {base_block_id}: {result}")
result_block_id = result["id"]
result_type = result["type"]
result_obj = result[result_type]
if result_type in ["ai_block", "unsupported", "external_object_instance_page"]:
logging.warning(f"[Notion]: Skipping unsupported block type {result_type}")
continue
if result_type == "table":
table_html = self._build_table_html(result_block_id)
if table_html:
result_blocks.append(
NotionBlock(
id=result_block_id,
text=table_html,
prefix="\n\n",
)
)
continue
if result_type == "equation":
expr = result_obj.get("expression")
if expr:
result_blocks.append(
NotionBlock(
id=result_block_id,
text=expr,
prefix="\n",
)
)
continue
cur_result_text_arr = []
if "rich_text" in result_obj:
text = self._extract_rich_text(result_obj["rich_text"])
if text:
cur_result_text_arr.append(text)
if result_type == "bulleted_list_item":
if cur_result_text_arr:
cur_result_text_arr[0] = f"- {cur_result_text_arr[0]}"
else:
cur_result_text_arr = ["- "]
if result_type == "numbered_list_item":
if cur_result_text_arr:
cur_result_text_arr[0] = f"1. {cur_result_text_arr[0]}"
else:
cur_result_text_arr = ["1. "]
if result_type == "to_do":
checked = result_obj.get("checked")
checkbox_prefix = "[x]" if checked else "[ ]"
if cur_result_text_arr:
cur_result_text_arr = [f"{checkbox_prefix} {cur_result_text_arr[0]}"] + cur_result_text_arr[1:]
else:
cur_result_text_arr = [checkbox_prefix]
if result_type in {"file", "image", "pdf", "video", "audio"}:
file_url, file_name, caption = self._extract_file_metadata(result_obj, result_block_id)
if file_url:
attachment_doc = self._build_attachment_document(
block_id=result_block_id,
url=file_url,
name=file_name,
caption=caption,
page_last_edited_time=page_last_edited_time,
page_path=page_path,
)
if attachment_doc:
attachments.append(attachment_doc)
attachment_label = file_name
if caption:
attachment_label = f"{file_name} ({caption})"
if attachment_label:
cur_result_text_arr.append(f"{result_type.capitalize()}: {attachment_label}")
if result["has_children"]:
if result_type == "child_page":
child_pages.append(result_block_id)
else:
logging.debug(f"[Notion]: Entering sub-block: {result_block_id}")
subblocks, subblock_child_pages, subblock_attachments = self._read_blocks(result_block_id, page_last_edited_time, page_path)
logging.debug(f"[Notion]: Finished sub-block: {result_block_id}")
result_blocks.extend(subblocks)
child_pages.extend(subblock_child_pages)
attachments.extend(subblock_attachments)
if result_type == "child_database":
inner_blocks, inner_child_pages = self._read_pages_from_database(result_block_id)
result_blocks.extend(inner_blocks)
if self.recursive_index_enabled:
child_pages.extend(inner_child_pages)
if cur_result_text_arr:
new_block = NotionBlock(
id=result_block_id,
text="\n".join(cur_result_text_arr),
prefix="\n",
)
result_blocks.append(new_block)
if data["next_cursor"] is None:
break
cursor = data["next_cursor"]
return result_blocks, child_pages, attachments
def _read_page_title(self, page: NotionPage) -> Optional[str]:
"""Extracts the title from a Notion page."""
if hasattr(page, "database_name") and page.database_name:
return page.database_name
for _, prop in page.properties.items():
if prop["type"] == "title" and len(prop["title"]) > 0:
page_title = " ".join([t["plain_text"] for t in prop["title"]]).strip()
return page_title
return None
def _build_page_path(self, page: NotionPage, visited: Optional[set[str]] = None) -> Optional[str]:
"""Construct a hierarchical path for a page based on its parent chain."""
if page.id in self.page_path_cache:
return self.page_path_cache[page.id]
visited = visited or set()
if page.id in visited:
logging.warning(f"[Notion]: Detected cycle while building path for page {page.id}")
return self._read_page_title(page)
visited.add(page.id)
current_title = self._read_page_title(page) or f"Untitled Page {page.id}"
parent_info = getattr(page, "parent", None) or {}
parent_type = parent_info.get("type")
parent_id = parent_info.get(parent_type) if parent_type else None
parent_path = None
if parent_type in {"page_id", "database_id"} and isinstance(parent_id, str):
try:
parent_page = self._fetch_page(parent_id)
parent_path = self._build_page_path(parent_page, visited)
except Exception as exc:
logging.warning(f"[Notion]: Failed to resolve parent {parent_id} for page {page.id}: {exc}")
full_path = f"{parent_path} / {current_title}" if parent_path else current_title
self.page_path_cache[page.id] = full_path
return full_path
def _read_pages(self, pages: list[NotionPage], start: SecondsSinceUnixEpoch | None = None, end: SecondsSinceUnixEpoch | None = None) -> Generator[Document, None, None]:
"""Reads pages for rich text content and generates Documents."""
all_child_page_ids: list[str] = []
for page in pages:
if isinstance(page, dict):
page = NotionPage(**page)
if page.id in self.indexed_pages:
logging.debug(f"[Notion]: Already indexed page with ID {page.id}. Skipping.")
continue
if start is not None and end is not None:
page_ts = datetime_from_string(page.last_edited_time).timestamp()
if not (page_ts > start and page_ts <= end):
logging.debug(f"[Notion]: Skipping page {page.id} outside polling window.")
continue
logging.info(f"[Notion]: Reading page with ID {page.id}, with url {page.url}")
page_path = self._build_page_path(page)
page_blocks, child_page_ids, attachment_docs = self._read_blocks(page.id, page.last_edited_time, page_path)
all_child_page_ids.extend(child_page_ids)
self.indexed_pages.add(page.id)
raw_page_title = self._read_page_title(page)
page_title = raw_page_title or f"Untitled Page with ID {page.id}"
# Append the page id to help disambiguate duplicate names
base_identifier = page_path or page_title
semantic_identifier = f"{base_identifier}_{page.id}" if base_identifier else page.id
if not page_blocks:
if not raw_page_title:
logging.warning(f"[Notion]: No blocks OR title found for page with ID {page.id}. Skipping.")
continue
text = page_title
if page.properties:
text += "\n\n" + "\n".join([f"{key}: {value}" for key, value in page.properties.items()])
sections = [TextSection(link=page.url, text=text)]
else:
sections = [
TextSection(
link=f"{page.url}#{block.id.replace('-', '')}",
text=block.prefix + block.text,
)
for block in page_blocks
]
joined_text = "\n".join(sec.text for sec in sections)
blob = joined_text.encode("utf-8")
yield Document(
id=page.id, blob=blob, source=DocumentSource.NOTION, semantic_identifier=semantic_identifier, extension=".txt", size_bytes=len(blob), doc_updated_at=datetime_from_string(page.last_edited_time)
)
for attachment_doc in attachment_docs:
yield attachment_doc
if self.recursive_index_enabled and all_child_page_ids:
for child_page_batch_ids in batch_generator(all_child_page_ids, INDEX_BATCH_SIZE):
child_page_batch = [self._fetch_page(page_id) for page_id in child_page_batch_ids if page_id not in self.indexed_pages]
yield from self._read_pages(child_page_batch, start, end)
@retry(tries=3, delay=1, backoff=2)
def _search_notion(self, query_dict: dict[str, Any]) -> NotionSearchResponse:
"""Search for pages from a Notion database."""
logging.debug(f"[Notion]: Searching for pages in Notion with query_dict: {query_dict}")
data = fetch_notion_data("https://api.notion.com/v1/search", self.headers, "POST", query_dict)
return NotionSearchResponse(**data)
def _recursive_load(self, start: SecondsSinceUnixEpoch | None = None, end: SecondsSinceUnixEpoch | None = None) -> Generator[list[Document], None, None]:
"""Recursively load pages starting from root page ID."""
if self.root_page_id is None or not self.recursive_index_enabled:
raise RuntimeError("Recursive page lookup is not enabled")
logging.info(f"[Notion]: Recursively loading pages from Notion based on root page with ID: {self.root_page_id}")
pages = [self._fetch_page(page_id=self.root_page_id)]
yield from batch_generator(self._read_pages(pages, start, end), self.batch_size)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Applies integration token to headers."""
self.headers["Authorization"] = f"Bearer {credentials['notion_integration_token']}"
return None
def load_from_state(self) -> GenerateDocumentsOutput:
"""Loads all page data from a Notion workspace."""
if self.recursive_index_enabled and self.root_page_id:
yield from self._recursive_load()
return
query_dict = {
"filter": {"property": "object", "value": "page"},
"page_size": 100,
}
while True:
db_res = self._search_notion(query_dict)
pages = [NotionPage(**page) for page in db_res.results]
yield from batch_generator(self._read_pages(pages), self.batch_size)
if db_res.has_more:
query_dict["start_cursor"] = db_res.next_cursor
else:
break
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Notion for updated pages within a time period."""
if self.recursive_index_enabled and self.root_page_id:
yield from self._recursive_load(start, end)
return
query_dict = {
"page_size": 100,
"sort": {"timestamp": "last_edited_time", "direction": "descending"},
"filter": {"property": "object", "value": "page"},
}
while True:
db_res = self._search_notion(query_dict)
pages = filter_pages_by_time(db_res.results, start, end, "last_edited_time")
if pages:
yield from batch_generator(self._read_pages(pages, start, end), self.batch_size)
if db_res.has_more:
query_dict["start_cursor"] = db_res.next_cursor
else:
break
else:
break
def validate_connector_settings(self) -> None:
"""Validate Notion connector settings and credentials."""
if not self.headers.get("Authorization"):
raise ConnectorMissingCredentialError("Notion credentials not loaded.")
try:
if self.root_page_id:
response = rl_requests.get(
f"https://api.notion.com/v1/pages/{self.root_page_id}",
headers=self.headers,
timeout=30,
)
else:
test_query = {"filter": {"property": "object", "value": "page"}, "page_size": 1}
response = rl_requests.post(
"https://api.notion.com/v1/search",
headers=self.headers,
json=test_query,
timeout=30,
)
response.raise_for_status()
except rl_requests.exceptions.HTTPError as http_err:
status_code = http_err.response.status_code if http_err.response else None
if status_code == 401:
raise CredentialExpiredError("Notion credential appears to be invalid or expired (HTTP 401).")
elif status_code == 403:
raise InsufficientPermissionsError("Your Notion token does not have sufficient permissions (HTTP 403).")
elif status_code == 404:
raise ConnectorValidationError("Notion resource not found or not shared with the integration (HTTP 404).")
elif status_code == 429:
raise ConnectorValidationError("Validation failed due to Notion rate-limits being exceeded (HTTP 429).")
else:
raise UnexpectedValidationError(f"Unexpected Notion HTTP error (status={status_code}): {http_err}")
except Exception as exc:
raise UnexpectedValidationError(f"Unexpected error during Notion settings validation: {exc}")
if __name__ == "__main__":
import os
root_page_id = os.environ.get("NOTION_ROOT_PAGE_ID")
connector = NotionConnector(root_page_id=root_page_id)
connector.load_credentials({"notion_integration_token": os.environ.get("NOTION_INTEGRATION_TOKEN")})
document_batches = connector.load_from_state()
for doc_batch in document_batches:
for doc in doc_batch:
print(doc) | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/notion_connector.py",
"license": "Apache License 2.0",
"lines": 549,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/sharepoint_connector.py | """SharePoint connector"""
from typing import Any
import msal
from office365.graph_client import GraphClient
from office365.runtime.client_request import ClientRequestException
from office365.sharepoint.client_context import ClientContext
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.exceptions import ConnectorValidationError, ConnectorMissingCredentialError
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync
)
from common.data_source.models import (
ConnectorCheckpoint
)
class SharePointConnector(CheckpointedConnectorWithPermSync, SlimConnectorWithPermSync):
"""SharePoint connector for accessing SharePoint sites and documents"""
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self.sharepoint_client = None
self.graph_client = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load SharePoint credentials"""
try:
tenant_id = credentials.get("tenant_id")
client_id = credentials.get("client_id")
client_secret = credentials.get("client_secret")
site_url = credentials.get("site_url")
if not all([tenant_id, client_id, client_secret, site_url]):
raise ConnectorMissingCredentialError("SharePoint credentials are incomplete")
# Create MSAL confidential client
app = msal.ConfidentialClientApplication(
client_id=client_id,
client_credential=client_secret,
authority=f"https://login.microsoftonline.com/{tenant_id}"
)
# Get access token
result = app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
if "access_token" not in result:
raise ConnectorMissingCredentialError("Failed to acquire SharePoint access token")
# Create Graph client
self.graph_client = GraphClient(result["access_token"])
# Create SharePoint client context
self.sharepoint_client = ClientContext(site_url).with_access_token(result["access_token"])
return None
except Exception as e:
raise ConnectorMissingCredentialError(f"SharePoint: {e}")
def validate_connector_settings(self) -> None:
"""Validate SharePoint connector settings"""
if not self.sharepoint_client or not self.graph_client:
raise ConnectorMissingCredentialError("SharePoint")
try:
# Test connection by getting site info
site = self.sharepoint_client.site.get().execute_query()
if not site:
raise ConnectorValidationError("Failed to access SharePoint site")
except ClientRequestException as e:
if "401" in str(e) or "403" in str(e):
raise ConnectorValidationError("Invalid credentials or insufficient permissions")
else:
raise ConnectorValidationError(f"SharePoint validation error: {e}")
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Any:
"""Poll SharePoint for recent documents"""
# Simplified implementation - in production this would handle actual polling
return []
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Any:
"""Load documents from checkpoint"""
# Simplified implementation
return []
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Any:
"""Load documents from checkpoint with permission sync"""
# Simplified implementation
return []
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
return ConnectorCheckpoint()
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
# Simplified implementation
return ConnectorCheckpoint()
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> Any:
"""Retrieve all simplified documents with permission sync"""
# Simplified implementation
return [] | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/sharepoint_connector.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/slack_connector.py | """Slack connector"""
import itertools
import logging
import re
from collections.abc import Callable, Generator
from datetime import datetime, timezone
from http.client import IncompleteRead, RemoteDisconnected
from typing import Any, cast
from urllib.error import URLError
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from slack_sdk.http_retry import ConnectionErrorRetryHandler
from slack_sdk.http_retry.builtin_interval_calculators import FixedValueRetryIntervalCalculator
from common.data_source.config import (
INDEX_BATCH_SIZE, SLACK_NUM_THREADS, ENABLE_EXPENSIVE_EXPERT_CALLS,
_SLACK_LIMIT, FAST_TIMEOUT, MAX_RETRIES, MAX_CHANNELS_TO_LOG
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError
)
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
CredentialsConnector,
SlimConnectorWithPermSync
)
from common.data_source.models import (
BasicExpertInfo,
ConnectorCheckpoint,
ConnectorFailure,
Document,
DocumentFailure,
SlimDocument,
TextSection,
SecondsSinceUnixEpoch,
GenerateSlimDocumentOutput, MessageType, SlackMessageFilterReason, ChannelType, ThreadType, ProcessedSlackMessage,
CheckpointOutput
)
from common.data_source.utils import make_paginated_slack_api_call, SlackTextCleaner, expert_info_from_slack_id, \
get_message_link
# Disallowed message subtypes list
_DISALLOWED_MSG_SUBTYPES = {
"channel_join", "channel_leave", "channel_archive", "channel_unarchive",
"pinned_item", "unpinned_item", "ekm_access_denied", "channel_posting_permissions",
"group_join", "group_leave", "group_archive", "group_unarchive",
"channel_leave", "channel_name", "channel_join",
}
def default_msg_filter(message: MessageType) -> SlackMessageFilterReason | None:
"""Default message filter"""
# Filter bot messages
if message.get("bot_id") or message.get("app_id"):
bot_profile_name = message.get("bot_profile", {}).get("name")
if bot_profile_name == "DanswerBot Testing":
return None
return SlackMessageFilterReason.BOT
# Filter non-informative content
if message.get("subtype", "") in _DISALLOWED_MSG_SUBTYPES:
return SlackMessageFilterReason.DISALLOWED
return None
def _collect_paginated_channels(
client: WebClient,
exclude_archived: bool,
channel_types: list[str],
) -> list[ChannelType]:
"""收集分页的频道列表"""
channels: list[ChannelType] = []
for result in make_paginated_slack_api_call(
client.conversations_list,
exclude_archived=exclude_archived,
types=channel_types,
):
channels.extend(result["channels"])
return channels
def get_channels(
client: WebClient,
exclude_archived: bool = True,
get_public: bool = True,
get_private: bool = True,
) -> list[ChannelType]:
channel_types = []
if get_public:
channel_types.append("public_channel")
if get_private:
channel_types.append("private_channel")
# First try to get public and private channels
try:
channels = _collect_paginated_channels(
client=client,
exclude_archived=exclude_archived,
channel_types=channel_types,
)
except SlackApiError as e:
msg = f"Unable to fetch private channels due to: {e}."
if not get_public:
logging.warning(msg + " Public channels are not enabled.")
return []
logging.warning(msg + " Trying again with public channels only.")
channel_types = ["public_channel"]
channels = _collect_paginated_channels(
client=client,
exclude_archived=exclude_archived,
channel_types=channel_types,
)
return channels
def get_channel_messages(
client: WebClient,
channel: ChannelType,
oldest: str | None = None,
latest: str | None = None,
callback: Any = None,
) -> Generator[list[MessageType], None, None]:
"""Get all messages in a channel"""
# Join channel so bot can access messages
if not channel["is_member"]:
client.conversations_join(
channel=channel["id"],
is_private=channel["is_private"],
)
logging.info(f"Successfully joined '{channel['name']}'")
for result in make_paginated_slack_api_call(
client.conversations_history,
channel=channel["id"],
oldest=oldest,
latest=latest,
):
if callback:
if callback.should_stop():
raise RuntimeError("get_channel_messages: Stop signal detected")
callback.progress("get_channel_messages", 0)
yield cast(list[MessageType], result["messages"])
def get_thread(client: WebClient, channel_id: str, thread_id: str) -> ThreadType:
threads: list[MessageType] = []
for result in make_paginated_slack_api_call(
client.conversations_replies, channel=channel_id, ts=thread_id
):
threads.extend(result["messages"])
return threads
def get_latest_message_time(thread: ThreadType) -> datetime:
max_ts = max([float(msg.get("ts", 0)) for msg in thread])
return datetime.fromtimestamp(max_ts, tz=timezone.utc)
def _build_doc_id(channel_id: str, thread_ts: str) -> str:
return f"{channel_id}__{thread_ts}"
def thread_to_doc(
channel: ChannelType,
thread: ThreadType,
slack_cleaner: SlackTextCleaner,
client: WebClient,
user_cache: dict[str, BasicExpertInfo | None],
channel_access: Any | None,
) -> Document:
channel_id = channel["id"]
initial_sender_expert_info = expert_info_from_slack_id(
user_id=thread[0].get("user"), client=client, user_cache=user_cache
)
initial_sender_name = (
initial_sender_expert_info.get_semantic_name()
if initial_sender_expert_info
else "Unknown"
)
valid_experts = None
if ENABLE_EXPENSIVE_EXPERT_CALLS:
all_sender_ids = [m.get("user") for m in thread]
experts = [
expert_info_from_slack_id(
user_id=sender_id, client=client, user_cache=user_cache
)
for sender_id in all_sender_ids
if sender_id
]
valid_experts = [expert for expert in experts if expert]
first_message = slack_cleaner.index_clean(cast(str, thread[0]["text"]))
snippet = (
first_message[:50].rstrip() + "..."
if len(first_message) > 50
else first_message
)
doc_sem_id = f"{initial_sender_name} in #{channel['name']}: {snippet}".replace(
"\n", " "
)
return Document(
id=_build_doc_id(channel_id=channel_id, thread_ts=thread[0]["ts"]),
sections=[
TextSection(
link=get_message_link(event=m, client=client, channel_id=channel_id),
text=slack_cleaner.index_clean(cast(str, m["text"])),
)
for m in thread
],
source="slack",
semantic_identifier=doc_sem_id,
doc_updated_at=get_latest_message_time(thread),
primary_owners=valid_experts,
metadata={"Channel": channel["name"]},
external_access=channel_access,
)
def filter_channels(
all_channels: list[ChannelType],
channels_to_connect: list[str] | None,
regex_enabled: bool,
) -> list[ChannelType]:
if not channels_to_connect:
return all_channels
if regex_enabled:
return [
channel
for channel in all_channels
if any(
re.fullmatch(channel_to_connect, channel["name"])
for channel_to_connect in channels_to_connect
)
]
# Validate all specified channels are valid
all_channel_names = {channel["name"] for channel in all_channels}
for channel in channels_to_connect:
if channel not in all_channel_names:
raise ValueError(
f"Channel '{channel}' not found in workspace. "
f"Available channels (Showing {len(all_channel_names)} of "
f"{min(len(all_channel_names), MAX_CHANNELS_TO_LOG)}): "
f"{list(itertools.islice(all_channel_names, MAX_CHANNELS_TO_LOG))}"
)
return [
channel for channel in all_channels if channel["name"] in channels_to_connect
]
def _get_channel_by_id(client: WebClient, channel_id: str) -> ChannelType:
response = client.conversations_info(
channel=channel_id,
)
return cast(ChannelType, response["channel"])
def _get_messages(
channel: ChannelType,
client: WebClient,
oldest: str | None = None,
latest: str | None = None,
limit: int = _SLACK_LIMIT,
) -> tuple[list[MessageType], bool]:
"""Get messages (Slack returns from newest to oldest)"""
# Must join channel to read messages
if not channel["is_member"]:
try:
client.conversations_join(
channel=channel["id"],
is_private=channel["is_private"],
)
except SlackApiError as e:
if e.response["error"] == "is_archived":
logging.warning(f"Channel {channel['name']} is archived. Skipping.")
return [], False
logging.exception(f"Error joining channel {channel['name']}")
raise
logging.info(f"Successfully joined '{channel['name']}'")
response = client.conversations_history(
channel=channel["id"],
oldest=oldest,
latest=latest,
limit=limit,
)
response.validate()
messages = cast(list[MessageType], response.get("messages", []))
cursor = cast(dict[str, Any], response.get("response_metadata", {})).get(
"next_cursor", ""
)
has_more = bool(cursor)
return messages, has_more
def _message_to_doc(
message: MessageType,
client: WebClient,
channel: ChannelType,
slack_cleaner: SlackTextCleaner,
user_cache: dict[str, BasicExpertInfo | None],
seen_thread_ts: set[str],
channel_access: Any | None,
msg_filter_func: Callable[
[MessageType], SlackMessageFilterReason | None
] = default_msg_filter,
) -> tuple[Document | None, SlackMessageFilterReason | None]:
"""Convert message to document"""
filtered_thread: ThreadType | None = None
filter_reason: SlackMessageFilterReason | None = None
thread_ts = message.get("thread_ts")
if thread_ts:
# If thread_ts exists, need to process thread
if thread_ts in seen_thread_ts:
return None, None
thread = get_thread(
client=client, channel_id=channel["id"], thread_id=thread_ts
)
filtered_thread = []
for message in thread:
filter_reason = msg_filter_func(message)
if filter_reason:
continue
filtered_thread.append(message)
else:
filter_reason = msg_filter_func(message)
if filter_reason:
return None, filter_reason
filtered_thread = [message]
if not filtered_thread:
return None, filter_reason
doc = thread_to_doc(
channel=channel,
thread=filtered_thread,
slack_cleaner=slack_cleaner,
client=client,
user_cache=user_cache,
channel_access=channel_access,
)
return doc, None
def _process_message(
message: MessageType,
client: WebClient,
channel: ChannelType,
slack_cleaner: SlackTextCleaner,
user_cache: dict[str, BasicExpertInfo | None],
seen_thread_ts: set[str],
channel_access: Any | None,
msg_filter_func: Callable[
[MessageType], SlackMessageFilterReason | None
] = default_msg_filter,
) -> ProcessedSlackMessage:
thread_ts = message.get("thread_ts")
thread_or_message_ts = thread_ts or message["ts"]
try:
doc, filter_reason = _message_to_doc(
message=message,
client=client,
channel=channel,
slack_cleaner=slack_cleaner,
user_cache=user_cache,
seen_thread_ts=seen_thread_ts,
channel_access=channel_access,
msg_filter_func=msg_filter_func,
)
return ProcessedSlackMessage(
doc=doc,
thread_or_message_ts=thread_or_message_ts,
filter_reason=filter_reason,
failure=None,
)
except Exception as e:
(logging.exception(f"Error processing message {message['ts']}"))
return ProcessedSlackMessage(
doc=None,
thread_or_message_ts=thread_or_message_ts,
filter_reason=None,
failure=ConnectorFailure(
failed_document=DocumentFailure(
document_id=_build_doc_id(
channel_id=channel["id"], thread_ts=thread_or_message_ts
),
document_link=get_message_link(message, client, channel["id"]),
),
failure_message=str(e),
exception=e,
),
)
def _get_all_doc_ids(
client: WebClient,
channels: list[str] | None = None,
channel_name_regex_enabled: bool = False,
msg_filter_func: Callable[
[MessageType], SlackMessageFilterReason | None
] = default_msg_filter,
callback: Any = None,
) -> GenerateSlimDocumentOutput:
all_channels = get_channels(client)
filtered_channels = filter_channels(
all_channels, channels, channel_name_regex_enabled
)
for channel in filtered_channels:
channel_id = channel["id"]
external_access = None # Simplified version, not handling permissions
channel_message_batches = get_channel_messages(
client=client,
channel=channel,
callback=callback,
)
for message_batch in channel_message_batches:
slim_doc_batch: list[SlimDocument] = []
for message in message_batch:
filter_reason = msg_filter_func(message)
if filter_reason:
continue
slim_doc_batch.append(
SlimDocument(
id=_build_doc_id(
channel_id=channel_id, thread_ts=message["ts"]
),
external_access=external_access,
)
)
yield slim_doc_batch
class SlackConnector(
SlimConnectorWithPermSync,
CredentialsConnector,
CheckpointedConnectorWithPermSync,
):
"""Slack connector"""
def __init__(
self,
channels: list[str] | None = None,
channel_regex_enabled: bool = False,
batch_size: int = INDEX_BATCH_SIZE,
num_threads: int = SLACK_NUM_THREADS,
use_redis: bool = False, # Simplified version, not using Redis
) -> None:
self.channels = channels
self.channel_regex_enabled = channel_regex_enabled
self.batch_size = batch_size
self.num_threads = num_threads
self.client: WebClient | None = None
self.fast_client: WebClient | None = None
self.text_cleaner: SlackTextCleaner | None = None
self.user_cache: dict[str, BasicExpertInfo | None] = {}
self.credentials_provider: Any = None
self.use_redis = use_redis
@property
def channels(self) -> list[str] | None:
return self._channels
@channels.setter
def channels(self, channels: list[str] | None) -> None:
self._channels = (
[channel.removeprefix("#") for channel in channels] if channels else None
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load credentials"""
raise NotImplementedError("Use set_credentials_provider with this connector.")
def set_credentials_provider(self, credentials_provider: Any) -> None:
"""Set credentials provider"""
credentials = credentials_provider.get_credentials()
bot_token = credentials["slack_bot_token"]
# Simplified version, not using Redis
connection_error_retry_handler = ConnectionErrorRetryHandler(
max_retry_count=MAX_RETRIES,
interval_calculator=FixedValueRetryIntervalCalculator(),
error_types=[
URLError,
ConnectionResetError,
RemoteDisconnected,
IncompleteRead,
],
)
self.client = WebClient(
token=bot_token, retry_handlers=[connection_error_retry_handler]
)
# For fast response requests
self.fast_client = WebClient(
token=bot_token, timeout=FAST_TIMEOUT
)
self.text_cleaner = SlackTextCleaner(client=self.client)
self.credentials_provider = credentials_provider
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> GenerateSlimDocumentOutput:
if self.client is None:
raise ConnectorMissingCredentialError("Slack")
return _get_all_doc_ids(
client=self.client,
channels=self.channels,
channel_name_regex_enabled=self.channel_regex_enabled,
callback=callback,
)
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> CheckpointOutput:
"""Load documents from checkpoint"""
# Simplified version, not implementing full checkpoint functionality
logging.warning("Checkpoint functionality not implemented in simplified version")
return []
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> CheckpointOutput:
"""Load documents from checkpoint (with permission sync)"""
# Simplified version, not implementing full checkpoint functionality
logging.warning("Checkpoint functionality not implemented in simplified version")
return []
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
return ConnectorCheckpoint()
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
return ConnectorCheckpoint()
def validate_connector_settings(self) -> None:
"""Validate connector settings"""
if self.fast_client is None:
raise ConnectorMissingCredentialError("Slack credentials not loaded.")
try:
# 1) Validate workspace connection
auth_response = self.fast_client.auth_test()
if not auth_response.get("ok", False):
error_msg = auth_response.get(
"error", "Unknown error from Slack auth_test"
)
raise ConnectorValidationError(f"Failed Slack auth_test: {error_msg}")
# 2) Confirm listing channels functionality works
test_resp = self.fast_client.conversations_list(
limit=1, types=["public_channel"]
)
if not test_resp.get("ok", False):
error_msg = test_resp.get("error", "Unknown error from Slack")
if error_msg == "invalid_auth":
raise ConnectorValidationError(
f"Invalid Slack bot token ({error_msg})."
)
elif error_msg == "not_authed":
raise CredentialExpiredError(
f"Invalid or expired Slack bot token ({error_msg})."
)
raise UnexpectedValidationError(
f"Slack API returned a failure: {error_msg}"
)
except SlackApiError as e:
slack_error = e.response.get("error", "")
if slack_error == "ratelimited":
retry_after = int(e.response.headers.get("Retry-After", 1))
logging.warning(
f"Slack API rate limited during validation. Retry suggested after {retry_after} seconds. "
"Proceeding with validation, but be aware that connector operations might be throttled."
)
return
elif slack_error == "missing_scope":
raise InsufficientPermissionsError(
"Slack bot token lacks the necessary scope to list/access channels. "
"Please ensure your Slack app has 'channels:read' (and/or 'groups:read' for private channels)."
)
elif slack_error == "invalid_auth":
raise CredentialExpiredError(
f"Invalid Slack bot token ({slack_error})."
)
elif slack_error == "not_authed":
raise CredentialExpiredError(
f"Invalid or expired Slack bot token ({slack_error})."
)
raise UnexpectedValidationError(
f"Unexpected Slack error '{slack_error}' during settings validation."
)
except ConnectorValidationError as e:
raise e
except Exception as e:
raise UnexpectedValidationError(
f"Unexpected error during Slack settings validation: {e}"
)
if __name__ == "__main__":
# Example usage
import os
slack_channel = os.environ.get("SLACK_CHANNEL")
connector = SlackConnector(
channels=[slack_channel] if slack_channel else None,
)
# Simplified version, directly using credentials dictionary
credentials = {
"slack_bot_token": os.environ.get("SLACK_BOT_TOKEN", "test-token")
}
class SimpleCredentialsProvider:
def get_credentials(self):
return credentials
provider = SimpleCredentialsProvider()
connector.set_credentials_provider(provider)
try:
connector.validate_connector_settings()
print("Slack connector settings validated successfully")
except Exception as e:
print(f"Validation failed: {e}") | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/slack_connector.py",
"license": "Apache License 2.0",
"lines": 576,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/data_source/teams_connector.py | """Microsoft Teams connector"""
from typing import Any
import msal
from office365.graph_client import GraphClient
from office365.runtime.client_request_exception import ClientRequestException
from common.data_source.exceptions import (
ConnectorValidationError,
InsufficientPermissionsError,
UnexpectedValidationError, ConnectorMissingCredentialError
)
from common.data_source.interfaces import (
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync, CheckpointedConnectorWithPermSync
)
from common.data_source.models import (
ConnectorCheckpoint
)
_SLIM_DOC_BATCH_SIZE = 5000
class TeamsCheckpoint(ConnectorCheckpoint):
"""Teams-specific checkpoint"""
todo_team_ids: list[str] | None = None
class TeamsConnector(CheckpointedConnectorWithPermSync, SlimConnectorWithPermSync):
"""Microsoft Teams connector for accessing Teams messages and channels"""
def __init__(self, batch_size: int = _SLIM_DOC_BATCH_SIZE) -> None:
self.batch_size = batch_size
self.teams_client = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load Microsoft Teams credentials"""
try:
tenant_id = credentials.get("tenant_id")
client_id = credentials.get("client_id")
client_secret = credentials.get("client_secret")
if not all([tenant_id, client_id, client_secret]):
raise ConnectorMissingCredentialError("Microsoft Teams credentials are incomplete")
# Create MSAL confidential client
app = msal.ConfidentialClientApplication(
client_id=client_id,
client_credential=client_secret,
authority=f"https://login.microsoftonline.com/{tenant_id}"
)
# Get access token
result = app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
if "access_token" not in result:
raise ConnectorMissingCredentialError("Failed to acquire Microsoft Teams access token")
# Create Graph client for Teams
self.teams_client = GraphClient(result["access_token"])
return None
except Exception as e:
raise ConnectorMissingCredentialError(f"Microsoft Teams: {e}")
def validate_connector_settings(self) -> None:
"""Validate Microsoft Teams connector settings"""
if not self.teams_client:
raise ConnectorMissingCredentialError("Microsoft Teams")
try:
# Test connection by getting teams
teams = self.teams_client.teams.get().execute_query()
if not teams:
raise ConnectorValidationError("Failed to access Microsoft Teams")
except ClientRequestException as e:
if "401" in str(e) or "403" in str(e):
raise InsufficientPermissionsError("Invalid credentials or insufficient permissions")
else:
raise UnexpectedValidationError(f"Microsoft Teams validation error: {e}")
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Any:
"""Poll Microsoft Teams for recent messages"""
# Simplified implementation - in production this would handle actual polling
return []
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Any:
"""Load documents from checkpoint"""
# Simplified implementation
return []
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
return TeamsCheckpoint()
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
# Simplified implementation
return TeamsCheckpoint()
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> Any:
"""Retrieve all simplified documents with permission sync"""
# Simplified implementation
return [] | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/data_source/teams_connector.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/svr/sync_data_source.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from beartype import BeartypeConf
# from beartype.claw import beartype_all # <-- you didn't sign up for this
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
import time
start_ts = time.time()
import asyncio
import copy
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from datetime import datetime, timezone
from typing import Any
from flask import json
from api.utils.common import hash128
from api.db.services.connector_service import ConnectorService, SyncLogsService
from api.db.services.knowledgebase_service import KnowledgebaseService
from common import settings
from common.config_utils import show_configs
from common.data_source import (
BlobStorageConnector,
NotionConnector,
DiscordConnector,
GoogleDriveConnector,
MoodleConnector,
JiraConnector,
DropboxConnector,
AirtableConnector,
AsanaConnector,
ImapConnector,
ZendeskConnector,
SeaFileConnector,
RDBMSConnector,
)
from common.constants import FileSource, TaskStatus
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.models import ConnectorFailure, SeafileSyncScope
from common.data_source.webdav_connector import WebDAVConnector
from common.data_source.confluence_connector import ConfluenceConnector
from common.data_source.gmail_connector import GmailConnector
from common.data_source.box_connector import BoxConnector
from common.data_source.github.connector import GithubConnector
from common.data_source.gitlab_connector import GitlabConnector
from common.data_source.bitbucket.connector import BitbucketConnector
from common.data_source.interfaces import CheckpointOutputWrapper
from common.log_utils import init_root_logger
from common.signal_utils import start_tracemalloc_and_snapshot, stop_tracemalloc
from common.versions import get_ragflow_version
from box_sdk_gen import BoxOAuth, OAuthConfig, AccessToken
MAX_CONCURRENT_TASKS = int(os.environ.get("MAX_CONCURRENT_TASKS", "5"))
task_limiter = asyncio.Semaphore(MAX_CONCURRENT_TASKS)
class SyncBase:
SOURCE_NAME: str = None
def __init__(self, conf: dict) -> None:
self.conf = conf
async def __call__(self, task: dict):
SyncLogsService.start(task["id"], task["connector_id"])
async with task_limiter:
try:
await asyncio.wait_for(self._run_task_logic(task), timeout=task["timeout_secs"])
except asyncio.TimeoutError:
msg = f"Task timeout after {task['timeout_secs']} seconds"
SyncLogsService.update_by_id(task["id"], {"status": TaskStatus.FAIL, "error_msg": msg})
return
except Exception as ex:
msg = "\n".join([
"".join(traceback.format_exception_only(None, ex)).strip(),
"".join(traceback.format_exception(None, ex, ex.__traceback__)).strip(),
])
SyncLogsService.update_by_id(task["id"], {
"status": TaskStatus.FAIL,
"full_exception_trace": msg,
"error_msg": str(ex)
})
return
SyncLogsService.schedule(task["connector_id"], task["kb_id"], task["poll_range_start"])
async def _run_task_logic(self, task: dict):
document_batch_generator = await self._generate(task)
doc_num = 0
failed_docs = 0
next_update = datetime(1970, 1, 1, tzinfo=timezone.utc)
if task["poll_range_start"]:
next_update = task["poll_range_start"]
for document_batch in document_batch_generator:
if not document_batch:
continue
min_update = min(doc.doc_updated_at for doc in document_batch)
max_update = max(doc.doc_updated_at for doc in document_batch)
next_update = max(next_update, max_update)
docs = []
for doc in document_batch:
d = {
"id": hash128(doc.id),
"connector_id": task["connector_id"],
"source": self.SOURCE_NAME,
"semantic_identifier": doc.semantic_identifier,
"extension": doc.extension,
"size_bytes": doc.size_bytes,
"doc_updated_at": doc.doc_updated_at,
"blob": doc.blob,
}
if doc.metadata:
d["metadata"] = doc.metadata
docs.append(d)
try:
e, kb = KnowledgebaseService.get_by_id(task["kb_id"])
err, dids = SyncLogsService.duplicate_and_parse(
kb, docs, task["tenant_id"],
f"{self.SOURCE_NAME}/{task['connector_id']}",
task["auto_parse"]
)
SyncLogsService.increase_docs(
task["id"], min_update, max_update,
len(docs), "\n".join(err), len(err)
)
doc_num += len(docs)
except Exception as batch_ex:
msg = str(batch_ex)
code = getattr(batch_ex, "args", [None])[0]
if code == 1267 or "collation" in msg.lower():
logging.warning(f"Skipping {len(docs)} document(s) due to collation conflict")
else:
logging.error(f"Error processing batch: {msg}")
failed_docs += len(docs)
continue
prefix = self._get_source_prefix()
if failed_docs > 0:
logging.info(f"{prefix}{doc_num} docs synchronized till {next_update} ({failed_docs} skipped)")
else:
logging.info(f"{prefix}{doc_num} docs synchronized till {next_update}")
SyncLogsService.done(task["id"], task["connector_id"])
task["poll_range_start"] = next_update
async def _generate(self, task: dict):
raise NotImplementedError
def _get_source_prefix(self):
return ""
class _BlobLikeBase(SyncBase):
DEFAULT_BUCKET_TYPE: str = "s3"
async def _generate(self, task: dict):
bucket_type = self.conf.get("bucket_type", self.DEFAULT_BUCKET_TYPE)
self.connector = BlobStorageConnector(
bucket_type=bucket_type,
bucket_name=self.conf["bucket_name"],
prefix=self.conf.get("prefix", ""),
)
self.connector.load_credentials(self.conf["credentials"])
document_batch_generator = (
self.connector.load_from_state()
if task["reindex"] == "1" or not task["poll_range_start"]
else self.connector.poll_source(
task["poll_range_start"].timestamp(),
datetime.now(timezone.utc).timestamp(),
)
)
begin_info = (
"totally"
if task["reindex"] == "1" or not task["poll_range_start"]
else "from {}".format(task["poll_range_start"])
)
logging.info(
"Connect to {}: {}(prefix/{}) {}".format(
bucket_type,
self.conf["bucket_name"],
self.conf.get("prefix", ""),
begin_info,
)
)
return document_batch_generator
class S3(_BlobLikeBase):
SOURCE_NAME: str = FileSource.S3
DEFAULT_BUCKET_TYPE: str = "s3"
class R2(_BlobLikeBase):
SOURCE_NAME: str = FileSource.R2
DEFAULT_BUCKET_TYPE: str = "r2"
class OCI_STORAGE(_BlobLikeBase):
SOURCE_NAME: str = FileSource.OCI_STORAGE
DEFAULT_BUCKET_TYPE: str = "oci_storage"
class GOOGLE_CLOUD_STORAGE(_BlobLikeBase):
SOURCE_NAME: str = FileSource.GOOGLE_CLOUD_STORAGE
DEFAULT_BUCKET_TYPE: str = "google_cloud_storage"
class Confluence(SyncBase):
SOURCE_NAME: str = FileSource.CONFLUENCE
async def _generate(self, task: dict):
from common.data_source.config import DocumentSource
from common.data_source.interfaces import StaticCredentialsProvider
index_mode = (self.conf.get("index_mode") or "everything").lower()
if index_mode not in {"everything", "space", "page"}:
index_mode = "everything"
space = ""
page_id = ""
index_recursively = False
if index_mode == "space":
space = (self.conf.get("space") or "").strip()
if not space:
raise ValueError("Space Key is required when indexing a specific Confluence space.")
elif index_mode == "page":
page_id = (self.conf.get("page_id") or "").strip()
if not page_id:
raise ValueError("Page ID is required when indexing a specific Confluence page.")
index_recursively = bool(self.conf.get("index_recursively", False))
self.connector = ConfluenceConnector(
wiki_base=self.conf["wiki_base"],
is_cloud=self.conf.get("is_cloud", True),
space=space,
page_id=page_id,
index_recursively=index_recursively,
)
credentials_provider = StaticCredentialsProvider(tenant_id=task["tenant_id"],
connector_name=DocumentSource.CONFLUENCE,
credential_json=self.conf["credentials"])
self.connector.set_credentials_provider(credentials_provider)
# Determine the time range for synchronization based on reindex or poll_range_start
if task["reindex"] == "1" or not task["poll_range_start"]:
start_time = 0.0
begin_info = "totally"
else:
start_time = task["poll_range_start"].timestamp()
begin_info = f"from {task['poll_range_start']}"
end_time = datetime.now(timezone.utc).timestamp()
raw_batch_size = self.conf.get("sync_batch_size") or self.conf.get("batch_size") or INDEX_BATCH_SIZE
try:
batch_size = int(raw_batch_size)
except (TypeError, ValueError):
batch_size = INDEX_BATCH_SIZE
if batch_size <= 0:
batch_size = INDEX_BATCH_SIZE
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
pending_docs = []
iterations = 0
iteration_limit = 100_000
while checkpoint.has_more:
wrapper = CheckpointOutputWrapper()
doc_generator = wrapper(self.connector.load_from_checkpoint(start_time, end_time, checkpoint))
for document, failure, next_checkpoint in doc_generator:
if failure is not None:
logging.warning("Confluence connector failure: %s",
getattr(failure, "failure_message", failure))
continue
if document is not None:
pending_docs.append(document)
if len(pending_docs) >= batch_size:
yield pending_docs
pending_docs = []
if next_checkpoint is not None:
checkpoint = next_checkpoint
iterations += 1
if iterations > iteration_limit:
raise RuntimeError("Too many iterations while loading Confluence documents.")
if pending_docs:
yield pending_docs
def wrapper():
for batch in document_batches():
yield batch
logging.info("Connect to Confluence: {} {}".format(self.conf["wiki_base"], begin_info))
return wrapper()
class Notion(SyncBase):
SOURCE_NAME: str = FileSource.NOTION
async def _generate(self, task: dict):
self.connector = NotionConnector(root_page_id=self.conf["root_page_id"])
self.connector.load_credentials(self.conf["credentials"])
document_generator = (
self.connector.load_from_state()
if task["reindex"] == "1" or not task["poll_range_start"]
else self.connector.poll_source(task["poll_range_start"].timestamp(),
datetime.now(timezone.utc).timestamp())
)
begin_info = "totally" if task["reindex"] == "1" or not task["poll_range_start"] else "from {}".format(
task["poll_range_start"])
logging.info("Connect to Notion: root({}) {}".format(self.conf["root_page_id"], begin_info))
return document_generator
class Discord(SyncBase):
SOURCE_NAME: str = FileSource.DISCORD
async def _generate(self, task: dict):
server_ids: str | None = self.conf.get("server_ids", None)
# "channel1,channel2"
channel_names: str | None = self.conf.get("channel_names", None)
self.connector = DiscordConnector(
server_ids=server_ids.split(",") if server_ids else [],
channel_names=channel_names.split(",") if channel_names else [],
start_date=datetime(1970, 1, 1, tzinfo=timezone.utc).strftime("%Y-%m-%d"),
batch_size=self.conf.get("batch_size", 1024),
)
self.connector.load_credentials(self.conf["credentials"])
document_generator = (
self.connector.load_from_state()
if task["reindex"] == "1" or not task["poll_range_start"]
else self.connector.poll_source(task["poll_range_start"].timestamp(),
datetime.now(timezone.utc).timestamp())
)
begin_info = "totally" if task["reindex"] == "1" or not task["poll_range_start"] else "from {}".format(
task["poll_range_start"])
logging.info("Connect to Discord: servers({}), channel({}) {}".format(server_ids, channel_names, begin_info))
return document_generator
class Gmail(SyncBase):
SOURCE_NAME: str = FileSource.GMAIL
async def _generate(self, task: dict):
# Gmail sync reuses the generic LoadConnector/PollConnector interface
# implemented by common.data_source.gmail_connector.GmailConnector.
#
# Config expectations (self.conf):
# credentials: Gmail / Workspace OAuth JSON (with primary admin email)
# batch_size: optional, defaults to INDEX_BATCH_SIZE
batch_size = self.conf.get("batch_size", INDEX_BATCH_SIZE)
self.connector = GmailConnector(batch_size=batch_size)
credentials = self.conf.get("credentials")
if not credentials:
raise ValueError("Gmail connector is missing credentials.")
new_credentials = self.connector.load_credentials(credentials)
if new_credentials:
# Persist rotated / refreshed credentials back to connector config
try:
updated_conf = copy.deepcopy(self.conf)
updated_conf["credentials"] = new_credentials
ConnectorService.update_by_id(task["connector_id"], {"config": updated_conf})
self.conf = updated_conf
logging.info(
"Persisted refreshed Gmail credentials for connector %s",
task["connector_id"],
)
except Exception:
logging.exception(
"Failed to persist refreshed Gmail credentials for connector %s",
task["connector_id"],
)
# Decide between full reindex and incremental polling by time range.
if task["reindex"] == "1" or not task.get("poll_range_start"):
start_time = None
end_time = None
begin_info = "totally"
document_generator = self.connector.load_from_state()
else:
poll_start = task["poll_range_start"]
# Defensive: if poll_start is somehow None, fall back to full load
if poll_start is None:
start_time = None
end_time = None
begin_info = "totally"
document_generator = self.connector.load_from_state()
else:
start_time = poll_start.timestamp()
end_time = datetime.now(timezone.utc).timestamp()
begin_info = f"from {poll_start}"
document_generator = self.connector.poll_source(start_time, end_time)
try:
admin_email = self.connector.primary_admin_email
except RuntimeError:
admin_email = "unknown"
logging.info(f"Connect to Gmail as {admin_email} {begin_info}")
return document_generator
class Dropbox(SyncBase):
SOURCE_NAME: str = FileSource.DROPBOX
async def _generate(self, task: dict):
self.connector = DropboxConnector(batch_size=self.conf.get("batch_size", INDEX_BATCH_SIZE))
self.connector.load_credentials(self.conf["credentials"])
if task["reindex"] == "1" or not task["poll_range_start"]:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
poll_start = task["poll_range_start"]
document_generator = self.connector.poll_source(
poll_start.timestamp(), datetime.now(timezone.utc).timestamp()
)
begin_info = f"from {poll_start}"
logging.info(f"[Dropbox] Connect to Dropbox {begin_info}")
return document_generator
class GoogleDrive(SyncBase):
SOURCE_NAME: str = FileSource.GOOGLE_DRIVE
async def _generate(self, task: dict):
connector_kwargs = {
"include_shared_drives": self.conf.get("include_shared_drives", False),
"include_my_drives": self.conf.get("include_my_drives", False),
"include_files_shared_with_me": self.conf.get("include_files_shared_with_me", False),
"shared_drive_urls": self.conf.get("shared_drive_urls"),
"my_drive_emails": self.conf.get("my_drive_emails"),
"shared_folder_urls": self.conf.get("shared_folder_urls"),
"specific_user_emails": self.conf.get("specific_user_emails"),
"batch_size": self.conf.get("batch_size", INDEX_BATCH_SIZE),
}
self.connector = GoogleDriveConnector(**connector_kwargs)
self.connector.set_allow_images(self.conf.get("allow_images", False))
credentials = self.conf.get("credentials")
if not credentials:
raise ValueError("Google Drive connector is missing credentials.")
new_credentials = self.connector.load_credentials(credentials)
if new_credentials:
self._persist_rotated_credentials(task["connector_id"], new_credentials)
if task["reindex"] == "1" or not task["poll_range_start"]:
start_time = 0.0
begin_info = "totally"
else:
start_time = task["poll_range_start"].timestamp()
begin_info = f"from {task['poll_range_start']}"
end_time = datetime.now(timezone.utc).timestamp()
raw_batch_size = self.conf.get("sync_batch_size") or self.conf.get("batch_size") or INDEX_BATCH_SIZE
try:
batch_size = int(raw_batch_size)
except (TypeError, ValueError):
batch_size = INDEX_BATCH_SIZE
if batch_size <= 0:
batch_size = INDEX_BATCH_SIZE
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
pending_docs = []
iterations = 0
iteration_limit = 100_000
while checkpoint.has_more:
wrapper = CheckpointOutputWrapper()
doc_generator = wrapper(self.connector.load_from_checkpoint(start_time, end_time, checkpoint))
for document, failure, next_checkpoint in doc_generator:
if failure is not None:
logging.warning("Google Drive connector failure: %s",
getattr(failure, "failure_message", failure))
continue
if document is not None:
pending_docs.append(document)
if len(pending_docs) >= batch_size:
yield pending_docs
pending_docs = []
if next_checkpoint is not None:
checkpoint = next_checkpoint
iterations += 1
if iterations > iteration_limit:
raise RuntimeError("Too many iterations while loading Google Drive documents.")
if pending_docs:
yield pending_docs
try:
admin_email = self.connector.primary_admin_email
except RuntimeError:
admin_email = "unknown"
logging.info(f"Connect to Google Drive as {admin_email} {begin_info}")
return document_batches()
def _persist_rotated_credentials(self, connector_id: str, credentials: dict[str, Any]) -> None:
try:
updated_conf = copy.deepcopy(self.conf)
updated_conf["credentials"] = credentials
ConnectorService.update_by_id(connector_id, {"config": updated_conf})
self.conf = updated_conf
logging.info("Persisted refreshed Google Drive credentials for connector %s", connector_id)
except Exception:
logging.exception("Failed to persist refreshed Google Drive credentials for connector %s", connector_id)
class Jira(SyncBase):
SOURCE_NAME: str = FileSource.JIRA
def _get_source_prefix(self):
return "[Jira]"
async def _generate(self, task: dict):
connector_kwargs = {
"jira_base_url": self.conf["base_url"],
"project_key": self.conf.get("project_key"),
"jql_query": self.conf.get("jql_query"),
"batch_size": self.conf.get("batch_size", INDEX_BATCH_SIZE),
"include_comments": self.conf.get("include_comments", True),
"include_attachments": self.conf.get("include_attachments", False),
"labels_to_skip": self._normalize_list(self.conf.get("labels_to_skip")),
"comment_email_blacklist": self._normalize_list(self.conf.get("comment_email_blacklist")),
"scoped_token": self.conf.get("scoped_token", False),
"attachment_size_limit": self.conf.get("attachment_size_limit"),
"timezone_offset": self.conf.get("timezone_offset"),
}
self.connector = JiraConnector(**connector_kwargs)
credentials = self.conf.get("credentials")
if not credentials:
raise ValueError("Jira connector is missing credentials.")
self.connector.load_credentials(credentials)
self.connector.validate_connector_settings()
if task["reindex"] == "1" or not task["poll_range_start"]:
start_time = 0.0
begin_info = "totally"
else:
start_time = task["poll_range_start"].timestamp()
begin_info = f"from {task['poll_range_start']}"
end_time = datetime.now(timezone.utc).timestamp()
raw_batch_size = self.conf.get("sync_batch_size") or self.conf.get("batch_size") or INDEX_BATCH_SIZE
try:
batch_size = int(raw_batch_size)
except (TypeError, ValueError):
batch_size = INDEX_BATCH_SIZE
if batch_size <= 0:
batch_size = INDEX_BATCH_SIZE
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
pending_docs = []
iterations = 0
iteration_limit = 100_000
while checkpoint.has_more:
wrapper = CheckpointOutputWrapper()
generator = wrapper(
self.connector.load_from_checkpoint(
start_time,
end_time,
checkpoint,
)
)
for document, failure, next_checkpoint in generator:
if failure is not None:
logging.warning(
f"[Jira] Jira connector failure: {getattr(failure, 'failure_message', failure)}"
)
continue
if document is not None:
pending_docs.append(document)
if len(pending_docs) >= batch_size:
yield pending_docs
pending_docs = []
if next_checkpoint is not None:
checkpoint = next_checkpoint
iterations += 1
if iterations > iteration_limit:
logging.error(f"[Jira] Task {task.get('id')} exceeded iteration limit ({iteration_limit}).")
raise RuntimeError("Too many iterations while loading Jira documents.")
if pending_docs:
yield pending_docs
logging.info(f"[Jira] Connect to Jira {connector_kwargs['jira_base_url']} {begin_info}")
return document_batches()
@staticmethod
def _normalize_list(values: Any) -> list[str] | None:
if values is None:
return None
if isinstance(values, str):
values = [item.strip() for item in values.split(",")]
return [str(value).strip() for value in values if value is not None and str(value).strip()]
class SharePoint(SyncBase):
SOURCE_NAME: str = FileSource.SHAREPOINT
async def _generate(self, task: dict):
pass
class Slack(SyncBase):
SOURCE_NAME: str = FileSource.SLACK
async def _generate(self, task: dict):
pass
class Teams(SyncBase):
SOURCE_NAME: str = FileSource.TEAMS
async def _generate(self, task: dict):
pass
class WebDAV(SyncBase):
SOURCE_NAME: str = FileSource.WEBDAV
async def _generate(self, task: dict):
self.connector = WebDAVConnector(
base_url=self.conf["base_url"],
remote_path=self.conf.get("remote_path", "/")
)
self.connector.load_credentials(self.conf["credentials"])
logging.info(f"Task info: reindex={task['reindex']}, poll_range_start={task['poll_range_start']}")
if task["reindex"] == "1" or not task["poll_range_start"]:
logging.info("Using load_from_state (full sync)")
document_batch_generator = self.connector.load_from_state()
begin_info = "totally"
else:
start_ts = task["poll_range_start"].timestamp()
end_ts = datetime.now(timezone.utc).timestamp()
logging.info(f"Polling WebDAV from {task['poll_range_start']} (ts: {start_ts}) to now (ts: {end_ts})")
document_batch_generator = self.connector.poll_source(start_ts, end_ts)
begin_info = "from {}".format(task["poll_range_start"])
logging.info("Connect to WebDAV: {}(path: {}) {}".format(
self.conf["base_url"],
self.conf.get("remote_path", "/"),
begin_info
))
def wrapper():
for document_batch in document_batch_generator:
yield document_batch
return wrapper()
class Moodle(SyncBase):
SOURCE_NAME: str = FileSource.MOODLE
async def _generate(self, task: dict):
self.connector = MoodleConnector(
moodle_url=self.conf["moodle_url"],
batch_size=self.conf.get("batch_size", INDEX_BATCH_SIZE)
)
self.connector.load_credentials(self.conf["credentials"])
# Determine the time range for synchronization based on reindex or poll_range_start
poll_start = task.get("poll_range_start")
if task["reindex"] == "1" or poll_start is None:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp(),
)
begin_info = f"from {poll_start}"
logging.info("Connect to Moodle: {} {}".format(self.conf["moodle_url"], begin_info))
return document_generator
class BOX(SyncBase):
SOURCE_NAME: str = FileSource.BOX
async def _generate(self, task: dict):
self.connector = BoxConnector(
folder_id=self.conf.get("folder_id", "0"),
)
credential = json.loads(self.conf['credentials']['box_tokens'])
auth = BoxOAuth(
OAuthConfig(
client_id=credential['client_id'],
client_secret=credential['client_secret'],
)
)
token = AccessToken(
access_token=credential['access_token'],
refresh_token=credential['refresh_token'],
)
auth.token_storage.store(token)
self.connector.load_credentials(auth)
poll_start = task["poll_range_start"]
if task["reindex"] == "1" or poll_start is None:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp(),
)
begin_info = f"from {poll_start}"
logging.info("Connect to Box: folder_id({}) {}".format(self.conf["folder_id"], begin_info))
return document_generator
class Airtable(SyncBase):
SOURCE_NAME: str = FileSource.AIRTABLE
async def _generate(self, task: dict):
"""
Sync files from Airtable attachments.
"""
self.connector = AirtableConnector(
base_id=self.conf.get("base_id"),
table_name_or_id=self.conf.get("table_name_or_id"),
)
credentials = self.conf.get("credentials", {})
if "airtable_access_token" not in credentials:
raise ValueError("Missing airtable_access_token in credentials")
self.connector.load_credentials(
{"airtable_access_token": credentials["airtable_access_token"]}
)
poll_start = task.get("poll_range_start")
if task.get("reindex") == "1" or poll_start is None:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp(),
)
begin_info = f"from {poll_start}"
logging.info(
"Connect to Airtable: base_id(%s), table(%s) %s",
self.conf.get("base_id"),
self.conf.get("table_name_or_id"),
begin_info,
)
return document_generator
class Asana(SyncBase):
SOURCE_NAME: str = FileSource.ASANA
async def _generate(self, task: dict):
self.connector = AsanaConnector(
self.conf.get("asana_workspace_id"),
self.conf.get("asana_project_ids"),
self.conf.get("asana_team_id"),
)
credentials = self.conf.get("credentials", {})
if "asana_api_token_secret" not in credentials:
raise ValueError("Missing asana_api_token_secret in credentials")
self.connector.load_credentials(
{"asana_api_token_secret": credentials["asana_api_token_secret"]}
)
if task.get("reindex") == "1" or not task.get("poll_range_start"):
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
poll_start = task.get("poll_range_start")
if poll_start is None:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp(),
)
begin_info = f"from {poll_start}"
logging.info(
"Connect to Asana: workspace_id(%s), project_ids(%s), team_id(%s) %s",
self.conf.get("asana_workspace_id"),
self.conf.get("asana_project_ids"),
self.conf.get("asana_team_id"),
begin_info,
)
return document_generator
class Github(SyncBase):
SOURCE_NAME: str = FileSource.GITHUB
async def _generate(self, task: dict):
"""
Sync files from Github repositories.
"""
from common.data_source.connector_runner import ConnectorRunner
self.connector = GithubConnector(
repo_owner=self.conf.get("repository_owner"),
repositories=self.conf.get("repository_name"),
include_prs=self.conf.get("include_pull_requests", False),
include_issues=self.conf.get("include_issues", False),
)
credentials = self.conf.get("credentials", {})
if "github_access_token" not in credentials:
raise ValueError("Missing github_access_token in credentials")
self.connector.load_credentials(
{"github_access_token": credentials["github_access_token"]}
)
if task.get("reindex") == "1" or not task.get("poll_range_start"):
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
begin_info = "totally"
else:
start_time = task.get("poll_range_start")
begin_info = f"from {start_time}"
end_time = datetime.now(timezone.utc)
runner = ConnectorRunner(
connector=self.connector,
batch_size=self.conf.get("batch_size", INDEX_BATCH_SIZE),
include_permissions=False,
time_range=(start_time, end_time)
)
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
while checkpoint.has_more:
for doc_batch, failure, next_checkpoint in runner.run(checkpoint):
if failure is not None:
logging.warning(
"Github connector failure: %s",
getattr(failure, "failure_message", failure),
)
continue
if doc_batch is not None:
yield doc_batch
if next_checkpoint is not None:
checkpoint = next_checkpoint
def wrapper():
for batch in document_batches():
yield batch
logging.info(
"Connect to Github: org_name(%s), repo_names(%s) for %s",
self.conf.get("repository_owner"),
self.conf.get("repository_name"),
begin_info,
)
return wrapper()
class IMAP(SyncBase):
SOURCE_NAME: str = FileSource.IMAP
async def _generate(self, task):
from common.data_source.config import DocumentSource
from common.data_source.interfaces import StaticCredentialsProvider
self.connector = ImapConnector(
host=self.conf.get("imap_host"),
port=self.conf.get("imap_port"),
mailboxes=self.conf.get("imap_mailbox"),
)
credentials_provider = StaticCredentialsProvider(tenant_id=task["tenant_id"], connector_name=DocumentSource.IMAP, credential_json=self.conf["credentials"])
self.connector.set_credentials_provider(credentials_provider)
end_time = datetime.now(timezone.utc).timestamp()
if task["reindex"] == "1" or not task["poll_range_start"]:
start_time = end_time - self.conf.get("poll_range",30) * 24 * 60 * 60
begin_info = "totally"
else:
start_time = task["poll_range_start"].timestamp()
begin_info = f"from {task['poll_range_start']}"
raw_batch_size = self.conf.get("sync_batch_size") or self.conf.get("batch_size") or INDEX_BATCH_SIZE
try:
batch_size = int(raw_batch_size)
except (TypeError, ValueError):
batch_size = INDEX_BATCH_SIZE
if batch_size <= 0:
batch_size = INDEX_BATCH_SIZE
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
pending_docs = []
iterations = 0
iteration_limit = 100_000
while checkpoint.has_more:
wrapper = CheckpointOutputWrapper()
doc_generator = wrapper(self.connector.load_from_checkpoint(start_time, end_time, checkpoint))
for document, failure, next_checkpoint in doc_generator:
if failure is not None:
logging.warning("IMAP connector failure: %s", getattr(failure, "failure_message", failure))
continue
if document is not None:
pending_docs.append(document)
if len(pending_docs) >= batch_size:
yield pending_docs
pending_docs = []
if next_checkpoint is not None:
checkpoint = next_checkpoint
iterations += 1
if iterations > iteration_limit:
raise RuntimeError("Too many iterations while loading IMAP documents.")
if pending_docs:
yield pending_docs
def wrapper():
for batch in document_batches():
yield batch
logging.info(
"Connect to IMAP: host(%s) port(%s) user(%s) folder(%s) %s",
self.conf["imap_host"],
self.conf["imap_port"],
self.conf["credentials"]["imap_username"],
self.conf["imap_mailbox"],
begin_info
)
return wrapper()
class Zendesk(SyncBase):
SOURCE_NAME: str = FileSource.ZENDESK
async def _generate(self, task: dict):
self.connector = ZendeskConnector(content_type=self.conf.get("zendesk_content_type"))
self.connector.load_credentials(self.conf["credentials"])
end_time = datetime.now(timezone.utc).timestamp()
if task["reindex"] == "1" or not task.get("poll_range_start"):
start_time = 0
begin_info = "totally"
else:
start_time = task["poll_range_start"].timestamp()
begin_info = f"from {task['poll_range_start']}"
raw_batch_size = (
self.conf.get("sync_batch_size")
or self.conf.get("batch_size")
or INDEX_BATCH_SIZE
)
try:
batch_size = int(raw_batch_size)
except (TypeError, ValueError):
batch_size = INDEX_BATCH_SIZE
if batch_size <= 0:
batch_size = INDEX_BATCH_SIZE
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
pending_docs = []
iterations = 0
iteration_limit = 100_000
while checkpoint.has_more:
wrapper = CheckpointOutputWrapper()
doc_generator = wrapper(
self.connector.load_from_checkpoint(
start_time, end_time, checkpoint
)
)
for document, failure, next_checkpoint in doc_generator:
if failure is not None:
logging.warning(
"Zendesk connector failure: %s",
getattr(failure, "failure_message", failure),
)
continue
if document is not None:
pending_docs.append(document)
if len(pending_docs) >= batch_size:
yield pending_docs
pending_docs = []
if next_checkpoint is not None:
checkpoint = next_checkpoint
iterations += 1
if iterations > iteration_limit:
raise RuntimeError(
"Too many iterations while loading Zendesk documents."
)
if pending_docs:
yield pending_docs
def wrapper():
for batch in document_batches():
yield batch
logging.info(
"Connect to Zendesk: subdomain(%s) %s",
self.conf['credentials'].get("zendesk_subdomain"),
begin_info,
)
return wrapper()
class Gitlab(SyncBase):
SOURCE_NAME: str = FileSource.GITLAB
async def _generate(self, task: dict):
"""
Sync files from GitLab attachments.
"""
self.connector = GitlabConnector(
project_owner= self.conf.get("project_owner"),
project_name= self.conf.get("project_name"),
include_mrs = self.conf.get("include_mrs", False),
include_issues = self.conf.get("include_issues", False),
include_code_files= self.conf.get("include_code_files", False),
)
self.connector.load_credentials(
{
"gitlab_access_token": self.conf.get("credentials", {}).get("gitlab_access_token"),
"gitlab_url": self.conf.get("gitlab_url"),
}
)
if task["reindex"] == "1" or not task["poll_range_start"]:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
poll_start = task["poll_range_start"]
if poll_start is None:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp()
)
begin_info = "from {}".format(poll_start)
logging.info("Connect to Gitlab: ({}) {}".format(self.conf["project_name"], begin_info))
return document_generator
class Bitbucket(SyncBase):
SOURCE_NAME: str = FileSource.BITBUCKET
async def _generate(self, task: dict):
self.connector = BitbucketConnector(
workspace=self.conf.get("workspace"),
repositories=self.conf.get("repository_slugs"),
projects=self.conf.get("projects"),
)
self.connector.load_credentials(
{
"bitbucket_email": self.conf["credentials"].get("bitbucket_account_email"),
"bitbucket_api_token": self.conf["credentials"].get("bitbucket_api_token"),
}
)
if task["reindex"] == "1" or not task["poll_range_start"]:
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
begin_info = "totally"
else:
start_time = task.get("poll_range_start")
begin_info = f"from {start_time}"
end_time = datetime.now(timezone.utc)
def document_batches():
checkpoint = self.connector.build_dummy_checkpoint()
while checkpoint.has_more:
gen = self.connector.load_from_checkpoint(
start=start_time.timestamp(),
end=end_time.timestamp(),
checkpoint=checkpoint)
while True:
try:
item = next(gen)
if isinstance(item, ConnectorFailure):
logging.exception(
"Bitbucket connector failure: %s",
item.failure_message)
break
yield [item]
except StopIteration as e:
checkpoint = e.value
break
def wrapper():
for batch in document_batches():
yield batch
logging.info(
"Connect to Bitbucket: workspace(%s), %s",
self.conf.get("workspace"),
begin_info,
)
return wrapper()
class SeaFile(SyncBase):
SOURCE_NAME: str = FileSource.SEAFILE
async def _generate(self, task: dict):
conf = self.conf
self.connector = SeaFileConnector(
seafile_url=conf["seafile_url"],
batch_size=conf.get("batch_size", INDEX_BATCH_SIZE),
include_shared=conf.get("include_shared", True),
sync_scope=conf.get("sync_scope", SeafileSyncScope.ACCOUNT),
repo_id=conf.get("repo_id") or None,
sync_path=conf.get("sync_path") or None,
)
self.connector.load_credentials(conf["credentials"])
poll_start = task.get("poll_range_start")
if task["reindex"] == "1" or poll_start is None:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp(),
)
begin_info = f"from {poll_start}"
scope = conf.get("sync_scope", "account")
extra = ""
if scope in ("library", "directory"):
extra = f" repo_id={conf.get('repo_id')}"
if scope == "directory":
extra += f" path={conf.get('sync_path')}"
logging.info(
"Connect to SeaFile: %s (scope=%s%s) %s",
conf["seafile_url"], scope, extra, begin_info,
)
return document_generator
class MySQL(SyncBase):
SOURCE_NAME: str = FileSource.MYSQL
async def _generate(self, task: dict):
self.connector = RDBMSConnector(
db_type="mysql",
host=self.conf.get("host", "localhost"),
port=int(self.conf.get("port", 3306)),
database=self.conf.get("database", ""),
query=self.conf.get("query", ""),
content_columns=self.conf.get("content_columns", ""),
batch_size=self.conf.get("batch_size", INDEX_BATCH_SIZE),
)
credentials = self.conf.get("credentials")
if not credentials:
raise ValueError("MySQL connector is missing credentials.")
self.connector.load_credentials(credentials)
self.connector.validate_connector_settings()
if task["reindex"] == "1" or not task["poll_range_start"]:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
poll_start = task["poll_range_start"]
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp()
)
begin_info = f"from {poll_start}"
logging.info(f"[MySQL] Connect to {self.conf.get('host')}:{self.conf.get('database')} {begin_info}")
return document_generator
class PostgreSQL(SyncBase):
SOURCE_NAME: str = FileSource.POSTGRESQL
async def _generate(self, task: dict):
self.connector = RDBMSConnector(
db_type="postgresql",
host=self.conf.get("host", "localhost"),
port=int(self.conf.get("port", 5432)),
database=self.conf.get("database", ""),
query=self.conf.get("query", ""),
content_columns=self.conf.get("content_columns", ""),
batch_size=self.conf.get("batch_size", INDEX_BATCH_SIZE),
)
credentials = self.conf.get("credentials")
if not credentials:
raise ValueError("PostgreSQL connector is missing credentials.")
self.connector.load_credentials(credentials)
self.connector.validate_connector_settings()
if task["reindex"] == "1" or not task["poll_range_start"]:
document_generator = self.connector.load_from_state()
begin_info = "totally"
else:
poll_start = task["poll_range_start"]
document_generator = self.connector.poll_source(
poll_start.timestamp(),
datetime.now(timezone.utc).timestamp()
)
begin_info = f"from {poll_start}"
logging.info(f"[PostgreSQL] Connect to {self.conf.get('host')}:{self.conf.get('database')} {begin_info}")
return document_generator
func_factory = {
FileSource.S3: S3,
FileSource.R2: R2,
FileSource.OCI_STORAGE: OCI_STORAGE,
FileSource.GOOGLE_CLOUD_STORAGE: GOOGLE_CLOUD_STORAGE,
FileSource.NOTION: Notion,
FileSource.DISCORD: Discord,
FileSource.CONFLUENCE: Confluence,
FileSource.GMAIL: Gmail,
FileSource.GOOGLE_DRIVE: GoogleDrive,
FileSource.JIRA: Jira,
FileSource.SHAREPOINT: SharePoint,
FileSource.SLACK: Slack,
FileSource.TEAMS: Teams,
FileSource.MOODLE: Moodle,
FileSource.DROPBOX: Dropbox,
FileSource.WEBDAV: WebDAV,
FileSource.BOX: BOX,
FileSource.AIRTABLE: Airtable,
FileSource.ASANA: Asana,
FileSource.IMAP: IMAP,
FileSource.ZENDESK: Zendesk,
FileSource.GITHUB: Github,
FileSource.GITLAB: Gitlab,
FileSource.BITBUCKET: Bitbucket,
FileSource.SEAFILE: SeaFile,
FileSource.MYSQL: MySQL,
FileSource.POSTGRESQL: PostgreSQL,
}
async def dispatch_tasks():
while True:
try:
list(SyncLogsService.list_sync_tasks()[0])
break
except Exception as e:
logging.warning(f"DB is not ready yet: {e}")
await asyncio.sleep(3)
tasks = []
for task in SyncLogsService.list_sync_tasks()[0]:
if task["poll_range_start"]:
task["poll_range_start"] = task["poll_range_start"].astimezone(timezone.utc)
if task["poll_range_end"]:
task["poll_range_end"] = task["poll_range_end"].astimezone(timezone.utc)
func = func_factory[task["source"]](task["config"])
tasks.append(asyncio.create_task(func(task)))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in dispatch_tasks: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
await asyncio.sleep(1)
stop_event = threading.Event()
def signal_handler(sig, frame):
logging.info("Received interrupt signal, shutting down...")
stop_event.set()
time.sleep(1)
sys.exit(0)
CONSUMER_NO = "0" if len(sys.argv) < 2 else sys.argv[1]
CONSUMER_NAME = "data_sync_" + CONSUMER_NO
async def main():
logging.info(r"""
_____ _ _____
| __ \ | | / ____|
| | | | __ _| |_ __ _ | (___ _ _ _ __ ___
| | | |/ _` | __/ _` | \___ \| | | | '_ \ / __|
| |__| | (_| | || (_| | ____) | |_| | | | | (__
|_____/ \__,_|\__\__,_| |_____/ \__, |_| |_|\___|
__/ |
|___/
""")
logging.info(f"RAGFlow version: {get_ragflow_version()}")
show_configs()
settings.init_settings()
if sys.platform != "win32":
signal.signal(signal.SIGUSR1, start_tracemalloc_and_snapshot)
signal.signal(signal.SIGUSR2, stop_tracemalloc)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
logging.info(f"RAGFlow data sync is ready after {time.time() - start_ts}s initialization.")
while not stop_event.is_set():
await dispatch_tasks()
logging.error("BUG!!! You should not reach here!!!")
if __name__ == "__main__":
faulthandler.enable()
init_root_logger(CONSUMER_NAME)
asyncio.run(main())
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/svr/sync_data_source.py",
"license": "Apache License 2.0",
"lines": 1148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/utils/file_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import hashlib
import zipfile
import requests
from requests.exceptions import Timeout, RequestException
from io import BytesIO
from typing import List, Union, Tuple, Optional, Dict
import PyPDF2
from docx import Document
import olefile
def _is_zip(h: bytes) -> bool:
return h.startswith(b"PK\x03\x04") or h.startswith(b"PK\x05\x06") or h.startswith(b"PK\x07\x08")
def _is_pdf(h: bytes) -> bool:
return h.startswith(b"%PDF-")
def _is_ole(h: bytes) -> bool:
return h.startswith(b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1")
def _sha10(b: bytes) -> str:
return hashlib.sha256(b).hexdigest()[:10]
def _guess_ext(b: bytes) -> str:
h = b[:8]
if _is_zip(h):
try:
with zipfile.ZipFile(io.BytesIO(b), "r") as z:
names = [n.lower() for n in z.namelist()]
if any(n.startswith("word/") for n in names):
return ".docx"
if any(n.startswith("ppt/") for n in names):
return ".pptx"
if any(n.startswith("xl/") for n in names):
return ".xlsx"
except Exception:
pass
return ".zip"
if _is_pdf(h):
return ".pdf"
if _is_ole(h):
return ".doc"
return ".bin"
# Try to extract the real embedded payload from OLE's Ole10Native
def _extract_ole10native_payload(data: bytes) -> bytes:
try:
pos = 0
if len(data) < 4:
return data
_ = int.from_bytes(data[pos:pos + 4], "little")
pos += 4
# filename/src/tmp (NUL-terminated ANSI)
for _ in range(3):
z = data.index(b"\x00", pos)
pos = z + 1
# skip unknown 4 bytes
pos += 4
if pos + 4 > len(data):
return data
size = int.from_bytes(data[pos:pos + 4], "little")
pos += 4
if pos + size <= len(data):
return data[pos:pos + size]
except Exception:
pass
return data
def extract_embed_file(target: Union[bytes, bytearray]) -> List[Tuple[str, bytes]]:
"""
Only extract the 'first layer' of embedding, returning raw (filename, bytes).
"""
top = bytes(target)
head = top[:8]
out: List[Tuple[str, bytes]] = []
seen = set()
def push(b: bytes, name_hint: str = ""):
h10 = _sha10(b)
if h10 in seen:
return
seen.add(h10)
ext = _guess_ext(b)
# If name_hint has an extension use its basename; else fallback to guessed ext
if "." in name_hint:
fname = name_hint.split("/")[-1]
else:
fname = f"{h10}{ext}"
out.append((fname, b))
# OOXML/ZIP container (docx/xlsx/pptx)
if _is_zip(head):
try:
with zipfile.ZipFile(io.BytesIO(top), "r") as z:
embed_dirs = (
"word/embeddings/", "word/objects/", "word/activex/",
"xl/embeddings/", "ppt/embeddings/"
)
for name in z.namelist():
low = name.lower()
if any(low.startswith(d) for d in embed_dirs):
try:
b = z.read(name)
push(b, name)
except Exception:
pass
except Exception:
pass
return out
# OLE container (doc/ppt/xls)
if _is_ole(head):
try:
with olefile.OleFileIO(io.BytesIO(top)) as ole:
for entry in ole.listdir():
p = "/".join(entry)
try:
data = ole.openstream(entry).read()
except Exception:
continue
if not data:
continue
if "Ole10Native" in p or "ole10native" in p.lower():
data = _extract_ole10native_payload(data)
push(data, p)
except Exception:
pass
return out
return out
def extract_links_from_docx(docx_bytes: bytes):
"""
Extract all hyperlinks from a Word (.docx) document binary stream.
Args:
docx_bytes (bytes): Raw bytes of a .docx file.
Returns:
set[str]: A set of unique hyperlink URLs.
"""
links = set()
with BytesIO(docx_bytes) as bio:
document = Document(bio)
# Each relationship may represent a hyperlink, image, footer, etc.
for rel in document.part.rels.values():
if rel.reltype == (
"http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink"
):
links.add(rel.target_ref)
return links
def extract_links_from_pdf(pdf_bytes: bytes):
"""
Extract all clickable hyperlinks from a PDF binary stream.
Args:
pdf_bytes (bytes): Raw bytes of a PDF file.
Returns:
set[str]: A set of unique hyperlink URLs (unordered).
"""
links = set()
with BytesIO(pdf_bytes) as bio:
pdf = PyPDF2.PdfReader(bio)
for page in pdf.pages:
annots = page.get("/Annots")
if not annots or isinstance(annots, PyPDF2.generic.IndirectObject):
continue
for annot in annots:
obj = annot.get_object()
a = obj.get("/A")
if a and a.get("/URI"):
links.add(a["/URI"])
return links
_GLOBAL_SESSION: Optional[requests.Session] = None
def _get_session(headers: Optional[Dict[str, str]] = None) -> requests.Session:
"""Get or create a global reusable session."""
global _GLOBAL_SESSION
if _GLOBAL_SESSION is None:
_GLOBAL_SESSION = requests.Session()
_GLOBAL_SESSION.headers.update({
"User-Agent": (
"Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/121.0 Safari/537.36"
)
})
if headers:
_GLOBAL_SESSION.headers.update(headers)
return _GLOBAL_SESSION
def extract_html(
url: str,
timeout: float = 60.0,
headers: Optional[Dict[str, str]] = None,
max_retries: int = 2,
) -> Tuple[Optional[bytes], Dict[str, str]]:
"""
Extract the full HTML page as raw bytes from a given URL.
Automatically reuses a persistent HTTP session and applies robust timeout & retry logic.
Args:
url (str): Target webpage URL.
timeout (float): Request timeout in seconds (applies to connect + read).
headers (dict, optional): Extra HTTP headers.
max_retries (int): Number of retries on timeout or transient errors.
Returns:
tuple(bytes|None, dict):
- html_bytes: Raw HTML content (or None if failed)
- metadata: HTTP info (status_code, content_type, final_url, error if any)
"""
session = _get_session(headers=headers)
metadata = {"final_url": url, "status_code": "", "content_type": "", "error": ""}
for attempt in range(1, max_retries + 1):
try:
resp = session.get(url, timeout=timeout)
resp.raise_for_status()
html_bytes = resp.content
metadata.update({
"final_url": resp.url,
"status_code": str(resp.status_code),
"content_type": resp.headers.get("Content-Type", ""),
})
return html_bytes, metadata
except Timeout:
metadata["error"] = f"Timeout after {timeout}s (attempt {attempt}/{max_retries})"
if attempt >= max_retries:
continue
except RequestException as e:
metadata["error"] = f"Request failed: {e}"
continue
return None, metadata
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/utils/file_utils.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:common/config_utils.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import copy
import logging
import importlib
from filelock import FileLock
from common.file_utils import get_project_base_directory
from common.constants import SERVICE_CONF
from ruamel.yaml import YAML
def load_yaml_conf(conf_path):
if not os.path.isabs(conf_path):
conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(conf_path) as f:
yaml = YAML(typ="safe", pure=True)
return yaml.load(f)
except Exception as e:
raise EnvironmentError("loading yaml file config from {} failed:".format(conf_path), e)
def rewrite_yaml_conf(conf_path, config):
if not os.path.isabs(conf_path):
conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(conf_path, "w") as f:
yaml = YAML(typ="safe")
yaml.dump(config, f)
except Exception as e:
raise EnvironmentError("rewrite yaml file config {} failed:".format(conf_path), e)
def conf_realpath(conf_name):
conf_path = f"conf/{conf_name}"
return os.path.join(get_project_base_directory(), conf_path)
def read_config(conf_name=SERVICE_CONF):
local_config = {}
local_path = conf_realpath(f'local.{conf_name}')
# load local config file
if os.path.exists(local_path):
local_config = load_yaml_conf(local_path)
if not isinstance(local_config, dict):
raise ValueError(f'Invalid config file: "{local_path}".')
global_config_path = conf_realpath(conf_name)
global_config = load_yaml_conf(global_config_path)
if not isinstance(global_config, dict):
raise ValueError(f'Invalid config file: "{global_config_path}".')
global_config.update(local_config)
return global_config
CONFIGS = read_config()
def show_configs():
msg = f"Current configs, from {conf_realpath(SERVICE_CONF)}:"
for k, v in CONFIGS.items():
if isinstance(v, dict):
if "password" in v:
v = copy.deepcopy(v)
v["password"] = "*" * 8
if "access_key" in v:
v = copy.deepcopy(v)
v["access_key"] = "*" * 8
if "secret_key" in v:
v = copy.deepcopy(v)
v["secret_key"] = "*" * 8
if "secret" in v:
v = copy.deepcopy(v)
v["secret"] = "*" * 8
if "sas_token" in v:
v = copy.deepcopy(v)
v["sas_token"] = "*" * 8
if "oauth" in k:
v = copy.deepcopy(v)
for key, val in v.items():
if "client_secret" in val:
val["client_secret"] = "*" * 8
if "authentication" in k:
v = copy.deepcopy(v)
for key, val in v.items():
if isinstance(val, dict) and "http_secret_key" in val:
val["http_secret_key"] = "*" * 8
msg += f"\n\t{k}: {v}"
logging.info(msg)
def get_base_config(key, default=None):
if key is None:
return None
if default is None:
default = os.environ.get(key.upper())
return CONFIGS.get(key, default)
def decrypt_database_password(password):
encrypt_password = get_base_config("encrypt_password", False)
encrypt_module = get_base_config("encrypt_module", False)
private_key = get_base_config("private_key", None)
if not password or not encrypt_password:
return password
if not private_key:
raise ValueError("No private key")
module_fun = encrypt_module.split("#")
pwdecrypt_fun = getattr(
importlib.import_module(
module_fun[0]),
module_fun[1])
return pwdecrypt_fun(private_key, password)
def decrypt_database_config(database=None, passwd_key="password", name="database"):
if not database:
database = get_base_config(name, {})
database[passwd_key] = decrypt_database_password(database[passwd_key])
return database
def update_config(key, value, conf_name=SERVICE_CONF):
conf_path = conf_realpath(conf_name=conf_name)
if not os.path.isabs(conf_path):
conf_path = os.path.join(get_project_base_directory(), conf_path)
with FileLock(os.path.join(os.path.dirname(conf_path), ".lock")):
config = load_yaml_conf(conf_path=conf_path) or {}
config[key] = value
rewrite_yaml_conf(conf_path=conf_path, config=config) | {
"repo_id": "infiniflow/ragflow",
"file_path": "common/config_utils.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.