repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/sharepoint_connector.py | common/data_source/sharepoint_connector.py | """SharePoint connector"""
from typing import Any
import msal
from office365.graph_client import GraphClient
from office365.runtime.client_request import ClientRequestException
from office365.sharepoint.client_context import ClientContext
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.exceptions import ConnectorValidationError, ConnectorMissingCredentialError
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync
)
from common.data_source.models import (
ConnectorCheckpoint
)
class SharePointConnector(CheckpointedConnectorWithPermSync, SlimConnectorWithPermSync):
"""SharePoint connector for accessing SharePoint sites and documents"""
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self.sharepoint_client = None
self.graph_client = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load SharePoint credentials"""
try:
tenant_id = credentials.get("tenant_id")
client_id = credentials.get("client_id")
client_secret = credentials.get("client_secret")
site_url = credentials.get("site_url")
if not all([tenant_id, client_id, client_secret, site_url]):
raise ConnectorMissingCredentialError("SharePoint credentials are incomplete")
# Create MSAL confidential client
app = msal.ConfidentialClientApplication(
client_id=client_id,
client_credential=client_secret,
authority=f"https://login.microsoftonline.com/{tenant_id}"
)
# Get access token
result = app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
if "access_token" not in result:
raise ConnectorMissingCredentialError("Failed to acquire SharePoint access token")
# Create Graph client
self.graph_client = GraphClient(result["access_token"])
# Create SharePoint client context
self.sharepoint_client = ClientContext(site_url).with_access_token(result["access_token"])
return None
except Exception as e:
raise ConnectorMissingCredentialError(f"SharePoint: {e}")
def validate_connector_settings(self) -> None:
"""Validate SharePoint connector settings"""
if not self.sharepoint_client or not self.graph_client:
raise ConnectorMissingCredentialError("SharePoint")
try:
# Test connection by getting site info
site = self.sharepoint_client.site.get().execute_query()
if not site:
raise ConnectorValidationError("Failed to access SharePoint site")
except ClientRequestException as e:
if "401" in str(e) or "403" in str(e):
raise ConnectorValidationError("Invalid credentials or insufficient permissions")
else:
raise ConnectorValidationError(f"SharePoint validation error: {e}")
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Any:
"""Poll SharePoint for recent documents"""
# Simplified implementation - in production this would handle actual polling
return []
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Any:
"""Load documents from checkpoint"""
# Simplified implementation
return []
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Any:
"""Load documents from checkpoint with permission sync"""
# Simplified implementation
return []
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
return ConnectorCheckpoint()
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
# Simplified implementation
return ConnectorCheckpoint()
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> Any:
"""Retrieve all simplified documents with permission sync"""
# Simplified implementation
return [] | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/blob_connector.py | common/data_source/blob_connector.py | """Blob storage connector"""
import logging
import os
from datetime import datetime, timezone
from typing import Any, Optional
from common.data_source.utils import (
create_s3_client,
detect_bucket_region,
download_object,
extract_size_bytes,
get_file_ext,
)
from common.data_source.config import BlobType, DocumentSource, BLOB_STORAGE_SIZE_THRESHOLD, INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError
)
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, SecondsSinceUnixEpoch, GenerateDocumentsOutput
class BlobStorageConnector(LoadConnector, PollConnector):
"""Blob storage connector"""
def __init__(
self,
bucket_type: str,
bucket_name: str,
prefix: str = "",
batch_size: int = INDEX_BATCH_SIZE,
european_residency: bool = False,
) -> None:
self.bucket_type: BlobType = BlobType(bucket_type)
self.bucket_name = bucket_name.strip()
self.prefix = prefix if not prefix or prefix.endswith("/") else prefix + "/"
self.batch_size = batch_size
self.s3_client: Optional[Any] = None
self._allow_images: bool | None = None
self.size_threshold: int | None = BLOB_STORAGE_SIZE_THRESHOLD
self.bucket_region: Optional[str] = None
self.european_residency: bool = european_residency
def set_allow_images(self, allow_images: bool) -> None:
"""Set whether to process images"""
logging.info(f"Setting allow_images to {allow_images}.")
self._allow_images = allow_images
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load credentials"""
logging.debug(
f"Loading credentials for {self.bucket_name} of type {self.bucket_type}"
)
# Validate credentials
if self.bucket_type == BlobType.R2:
if not all(
credentials.get(key)
for key in ["r2_access_key_id", "r2_secret_access_key", "account_id"]
):
raise ConnectorMissingCredentialError("Cloudflare R2")
elif self.bucket_type == BlobType.S3:
authentication_method = credentials.get("authentication_method", "access_key")
if authentication_method == "access_key":
if not all(
credentials.get(key)
for key in ["aws_access_key_id", "aws_secret_access_key"]
):
raise ConnectorMissingCredentialError("Amazon S3")
elif authentication_method == "iam_role":
if not credentials.get("aws_role_arn"):
raise ConnectorMissingCredentialError("Amazon S3 IAM role ARN is required")
elif authentication_method == "assume_role":
pass
else:
raise ConnectorMissingCredentialError("Unsupported S3 authentication method")
elif self.bucket_type == BlobType.GOOGLE_CLOUD_STORAGE:
if not all(
credentials.get(key) for key in ["access_key_id", "secret_access_key"]
):
raise ConnectorMissingCredentialError("Google Cloud Storage")
elif self.bucket_type == BlobType.OCI_STORAGE:
if not all(
credentials.get(key)
for key in ["namespace", "region", "access_key_id", "secret_access_key"]
):
raise ConnectorMissingCredentialError("Oracle Cloud Infrastructure")
elif self.bucket_type == BlobType.S3_COMPATIBLE:
if not all(
credentials.get(key)
for key in ["endpoint_url", "aws_access_key_id", "aws_secret_access_key", "addressing_style"]
):
raise ConnectorMissingCredentialError("S3 Compatible Storage")
else:
raise ValueError(f"Unsupported bucket type: {self.bucket_type}")
# Create S3 client
self.s3_client = create_s3_client(
self.bucket_type, credentials, self.european_residency
)
# Detect bucket region (only important for S3)
if self.bucket_type == BlobType.S3:
self.bucket_region = detect_bucket_region(self.s3_client, self.bucket_name)
return None
def _yield_blob_objects(
self,
start: datetime,
end: datetime,
) -> GenerateDocumentsOutput:
"""Generate bucket objects"""
if self.s3_client is None:
raise ConnectorMissingCredentialError("Blob storage")
paginator = self.s3_client.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=self.bucket_name, Prefix=self.prefix)
# Collect all objects first to count filename occurrences
all_objects = []
for page in pages:
if "Contents" not in page:
continue
for obj in page["Contents"]:
if obj["Key"].endswith("/"):
continue
last_modified = obj["LastModified"].replace(tzinfo=timezone.utc)
if start < last_modified <= end:
all_objects.append(obj)
# Count filename occurrences to determine which need full paths
filename_counts: dict[str, int] = {}
for obj in all_objects:
file_name = os.path.basename(obj["Key"])
filename_counts[file_name] = filename_counts.get(file_name, 0) + 1
batch: list[Document] = []
for obj in all_objects:
last_modified = obj["LastModified"].replace(tzinfo=timezone.utc)
file_name = os.path.basename(obj["Key"])
key = obj["Key"]
size_bytes = extract_size_bytes(obj)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{file_name} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
try:
blob = download_object(self.s3_client, self.bucket_name, key, self.size_threshold)
if blob is None:
continue
# Use full path only if filename appears multiple times
if filename_counts.get(file_name, 0) > 1:
relative_path = key
if self.prefix and key.startswith(self.prefix):
relative_path = key[len(self.prefix):]
semantic_id = relative_path.replace('/', ' / ') if relative_path else file_name
else:
semantic_id = file_name
batch.append(
Document(
id=f"{self.bucket_type}:{self.bucket_name}:{key}",
blob=blob,
source=DocumentSource(self.bucket_type.value),
semantic_identifier=semantic_id,
extension=get_file_ext(file_name),
doc_updated_at=last_modified,
size_bytes=size_bytes if size_bytes else 0
)
)
if len(batch) == self.batch_size:
yield batch
batch = []
except Exception:
logging.exception(f"Error decoding object {key}")
if batch:
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load documents from state"""
logging.debug("Loading blob objects")
return self._yield_blob_objects(
start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime.now(timezone.utc),
)
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
"""Poll source to get documents"""
if self.s3_client is None:
raise ConnectorMissingCredentialError("Blob storage")
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
for batch in self._yield_blob_objects(start_datetime, end_datetime):
yield batch
def validate_connector_settings(self) -> None:
"""Validate connector settings"""
if self.s3_client is None:
raise ConnectorMissingCredentialError(
"Blob storage credentials not loaded."
)
if not self.bucket_name:
raise ConnectorValidationError(
"No bucket name was provided in connector settings."
)
try:
# Lightweight validation step
self.s3_client.list_objects_v2(
Bucket=self.bucket_name, Prefix=self.prefix, MaxKeys=1
)
except Exception as e:
error_code = getattr(e, 'response', {}).get('Error', {}).get('Code', '')
status_code = getattr(e, 'response', {}).get('ResponseMetadata', {}).get('HTTPStatusCode')
# Common S3 error scenarios
if error_code in [
"AccessDenied",
"InvalidAccessKeyId",
"SignatureDoesNotMatch",
]:
if status_code == 403 or error_code == "AccessDenied":
raise InsufficientPermissionsError(
f"Insufficient permissions to list objects in bucket '{self.bucket_name}'. "
"Please check your bucket policy and/or IAM policy."
)
if status_code == 401 or error_code == "SignatureDoesNotMatch":
raise CredentialExpiredError(
"Provided blob storage credentials appear invalid or expired."
)
raise CredentialExpiredError(
f"Credential issue encountered ({error_code})."
)
if error_code == "NoSuchBucket" or status_code == 404:
raise ConnectorValidationError(
f"Bucket '{self.bucket_name}' does not exist or cannot be found."
)
raise ConnectorValidationError(
f"Unexpected S3 client error (code={error_code}, status={status_code}): {e}"
)
if __name__ == "__main__":
# Example usage
credentials_dict = {
"aws_access_key_id": os.environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("AWS_SECRET_ACCESS_KEY"),
}
# Initialize connector
connector = BlobStorageConnector(
bucket_type=os.environ.get("BUCKET_TYPE") or "s3",
bucket_name=os.environ.get("BUCKET_NAME") or "yyboombucket",
prefix="",
)
try:
connector.load_credentials(credentials_dict)
document_batch_generator = connector.load_from_state()
for document_batch in document_batch_generator:
print("First batch of documents:")
for doc in document_batch:
print(f"Document ID: {doc.id}")
print(f"Semantic Identifier: {doc.semantic_identifier}")
print(f"Source: {doc.source}")
print(f"Updated At: {doc.doc_updated_at}")
print("---")
break
except ConnectorMissingCredentialError as e:
print(f"Error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/file_types.py | common/data_source/file_types.py | PRESENTATION_MIME_TYPE = (
"application/vnd.openxmlformats-officedocument.presentationml.presentation"
)
SPREADSHEET_MIME_TYPE = (
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
WORD_PROCESSING_MIME_TYPE = (
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
PDF_MIME_TYPE = "application/pdf"
class UploadMimeTypes:
IMAGE_MIME_TYPES = {"image/jpeg", "image/png", "image/webp"}
CSV_MIME_TYPES = {"text/csv"}
TEXT_MIME_TYPES = {
"text/plain",
"text/markdown",
"text/x-markdown",
"text/mdx",
"text/x-config",
"text/tab-separated-values",
"application/json",
"application/xml",
"text/xml",
"application/x-yaml",
}
DOCUMENT_MIME_TYPES = {
PDF_MIME_TYPE,
WORD_PROCESSING_MIME_TYPE,
PRESENTATION_MIME_TYPE,
SPREADSHEET_MIME_TYPE,
"message/rfc822",
"application/epub+zip",
}
ALLOWED_MIME_TYPES = IMAGE_MIME_TYPES.union(
TEXT_MIME_TYPES, DOCUMENT_MIME_TYPES, CSV_MIME_TYPES
)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/gmail_connector.py | common/data_source/gmail_connector.py | import logging
from typing import Any
from google.oauth2.credentials import Credentials as OAuthCredentials
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
from googleapiclient.errors import HttpError
from common.data_source.config import INDEX_BATCH_SIZE, SLIM_BATCH_SIZE, DocumentSource
from common.data_source.google_util.auth import get_google_creds
from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, SCOPE_INSTRUCTIONS, USER_FIELDS
from common.data_source.google_util.resource import get_admin_service, get_gmail_service
from common.data_source.google_util.util import _execute_single_retrieval, execute_paginated_retrieval, clean_string
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync
from common.data_source.models import BasicExpertInfo, Document, ExternalAccess, GenerateDocumentsOutput, GenerateSlimDocumentOutput, SlimDocument, TextSection
from common.data_source.utils import build_time_range_query, clean_email_and_extract_name, get_message_body, is_mail_service_disabled_error, gmail_time_str_to_utc, sanitize_filename
# Constants for Gmail API fields
THREAD_LIST_FIELDS = "nextPageToken, threads(id)"
PARTS_FIELDS = "parts(body(data), mimeType)"
PAYLOAD_FIELDS = f"payload(headers, {PARTS_FIELDS})"
MESSAGES_FIELDS = f"messages(id, {PAYLOAD_FIELDS})"
THREADS_FIELDS = f"threads(id, {MESSAGES_FIELDS})"
THREAD_FIELDS = f"id, {MESSAGES_FIELDS}"
EMAIL_FIELDS = ["cc", "bcc", "from", "to"]
def _get_owners_from_emails(emails: dict[str, str | None]) -> list[BasicExpertInfo]:
"""Convert email dictionary to list of BasicExpertInfo objects."""
owners = []
for email, names in emails.items():
if names:
name_parts = names.split(" ")
first_name = " ".join(name_parts[:-1])
last_name = name_parts[-1]
else:
first_name = None
last_name = None
owners.append(BasicExpertInfo(email=email, first_name=first_name, last_name=last_name))
return owners
def message_to_section(message: dict[str, Any]) -> tuple[TextSection, dict[str, str]]:
"""Convert Gmail message to text section and metadata."""
link = f"https://mail.google.com/mail/u/0/#inbox/{message['id']}"
payload = message.get("payload", {})
headers = payload.get("headers", [])
metadata: dict[str, Any] = {}
for header in headers:
name = header.get("name", "").lower()
value = header.get("value", "")
if name in EMAIL_FIELDS:
metadata[name] = value
if name == "subject":
metadata["subject"] = value
if name == "date":
metadata["updated_at"] = value
if labels := message.get("labelIds"):
metadata["labels"] = labels
message_data = ""
for name, value in metadata.items():
if name != "updated_at":
message_data += f"{name}: {value}\n"
message_body_text: str = get_message_body(payload)
return TextSection(link=link, text=message_body_text + message_data), metadata
def thread_to_document(full_thread: dict[str, Any], email_used_to_fetch_thread: str) -> Document | None:
"""Convert Gmail thread to Document object."""
all_messages = full_thread.get("messages", [])
if not all_messages:
return None
sections = []
semantic_identifier = ""
updated_at = None
from_emails: dict[str, str | None] = {}
other_emails: dict[str, str | None] = {}
for message in all_messages:
section, message_metadata = message_to_section(message)
sections.append(section)
for name, value in message_metadata.items():
if name in EMAIL_FIELDS:
email, display_name = clean_email_and_extract_name(value)
if name == "from":
from_emails[email] = display_name if not from_emails.get(email) else None
else:
other_emails[email] = display_name if not other_emails.get(email) else None
if not semantic_identifier:
semantic_identifier = message_metadata.get("subject", "")
semantic_identifier = clean_string(semantic_identifier)
semantic_identifier = sanitize_filename(semantic_identifier)
if message_metadata.get("updated_at"):
updated_at = message_metadata.get("updated_at")
updated_at_datetime = None
if updated_at:
updated_at_datetime = gmail_time_str_to_utc(updated_at)
thread_id = full_thread.get("id")
if not thread_id:
raise ValueError("Thread ID is required")
primary_owners = _get_owners_from_emails(from_emails)
secondary_owners = _get_owners_from_emails(other_emails)
if not semantic_identifier:
semantic_identifier = "(no subject)"
combined_sections = "\n\n".join(
sec.text for sec in sections if hasattr(sec, "text")
)
blob = combined_sections
size_bytes = len(blob)
extension = '.txt'
return Document(
id=thread_id,
semantic_identifier=semantic_identifier,
blob=blob,
size_bytes=size_bytes,
extension=extension,
source=DocumentSource.GMAIL,
primary_owners=primary_owners,
secondary_owners=secondary_owners,
doc_updated_at=updated_at_datetime,
metadata=message_metadata,
external_access=ExternalAccess(
external_user_emails={email_used_to_fetch_thread},
external_user_group_ids=set(),
is_public=False,
),
)
class GmailConnector(LoadConnector, PollConnector, SlimConnectorWithPermSync):
"""Gmail connector for synchronizing emails from Gmail accounts."""
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self._creds: OAuthCredentials | ServiceAccountCredentials | None = None
self._primary_admin_email: str | None = None
@property
def primary_admin_email(self) -> str:
"""Get primary admin email."""
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email
@property
def google_domain(self) -> str:
"""Get Google domain from email."""
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email.split("@")[-1]
@property
def creds(self) -> OAuthCredentials | ServiceAccountCredentials:
"""Get Google credentials."""
if self._creds is None:
raise RuntimeError("Creds missing, should not call this property before calling load_credentials")
return self._creds
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, str] | None:
"""Load Gmail credentials."""
primary_admin_email = credentials[DB_CREDENTIALS_PRIMARY_ADMIN_KEY]
self._primary_admin_email = primary_admin_email
self._creds, new_creds_dict = get_google_creds(
credentials=credentials,
source=DocumentSource.GMAIL,
)
return new_creds_dict
def _get_all_user_emails(self) -> list[str]:
"""Get all user emails for Google Workspace domain."""
try:
admin_service = get_admin_service(self.creds, self.primary_admin_email)
emails = []
for user in execute_paginated_retrieval(
retrieval_function=admin_service.users().list,
list_key="users",
fields=USER_FIELDS,
domain=self.google_domain,
):
if email := user.get("primaryEmail"):
emails.append(email)
return emails
except HttpError as e:
if e.resp.status == 404:
logging.warning("Received 404 from Admin SDK; this may indicate a personal Gmail account with no Workspace domain. Falling back to single user.")
return [self.primary_admin_email]
raise
except Exception:
raise
def _fetch_threads(
self,
time_range_start: SecondsSinceUnixEpoch | None = None,
time_range_end: SecondsSinceUnixEpoch | None = None,
) -> GenerateDocumentsOutput:
"""Fetch Gmail threads within time range."""
query = build_time_range_query(time_range_start, time_range_end)
doc_batch = []
for user_email in self._get_all_user_emails():
gmail_service = get_gmail_service(self.creds, user_email)
try:
for thread in execute_paginated_retrieval(
retrieval_function=gmail_service.users().threads().list,
list_key="threads",
userId=user_email,
fields=THREAD_LIST_FIELDS,
q=query,
continue_on_404_or_403=True,
):
full_thread = _execute_single_retrieval(
retrieval_function=gmail_service.users().threads().get,
userId=user_email,
fields=THREAD_FIELDS,
id=thread["id"],
continue_on_404_or_403=True,
)
doc = thread_to_document(full_thread, user_email)
if doc is None:
continue
doc_batch.append(doc)
if len(doc_batch) > self.batch_size:
yield doc_batch
doc_batch = []
except HttpError as e:
if is_mail_service_disabled_error(e):
logging.warning(
"Skipping Gmail sync for %s because the mailbox is disabled.",
user_email,
)
continue
raise
if doc_batch:
yield doc_batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load all documents from Gmail."""
try:
yield from self._fetch_threads()
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError(SCOPE_INSTRUCTIONS) from e
raise e
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Gmail for documents within time range."""
try:
yield from self._fetch_threads(start, end)
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError(SCOPE_INSTRUCTIONS) from e
raise e
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback=None,
) -> GenerateSlimDocumentOutput:
"""Retrieve slim documents for permission synchronization."""
query = build_time_range_query(start, end)
doc_batch = []
for user_email in self._get_all_user_emails():
logging.info(f"Fetching slim threads for user: {user_email}")
gmail_service = get_gmail_service(self.creds, user_email)
try:
for thread in execute_paginated_retrieval(
retrieval_function=gmail_service.users().threads().list,
list_key="threads",
userId=user_email,
fields=THREAD_LIST_FIELDS,
q=query,
continue_on_404_or_403=True,
):
doc_batch.append(
SlimDocument(
id=thread["id"],
external_access=ExternalAccess(
external_user_emails={user_email},
external_user_group_ids=set(),
is_public=False,
),
)
)
if len(doc_batch) > SLIM_BATCH_SIZE:
yield doc_batch
doc_batch = []
except HttpError as e:
if is_mail_service_disabled_error(e):
logging.warning(
"Skipping slim Gmail sync for %s because the mailbox is disabled.",
user_email,
)
continue
raise
if doc_batch:
yield doc_batch
if __name__ == "__main__":
import time
import os
from common.data_source.google_util.util import get_credentials_from_env
logging.basicConfig(level=logging.INFO)
try:
email = os.environ.get("GMAIL_TEST_EMAIL", "newyorkupperbay@gmail.com")
creds = get_credentials_from_env(email, oauth=True, source="gmail")
print("Credentials loaded successfully")
print(f"{creds=}")
connector = GmailConnector(batch_size=2)
print("GmailConnector initialized")
connector.load_credentials(creds)
print("Credentials loaded into connector")
print("Gmail is ready to use")
for file in connector._fetch_threads(
int(time.time()) - 1 * 24 * 60 * 60,
int(time.time()),
):
print("new batch","-"*80)
for f in file:
print(f)
print("\n\n")
except Exception as e:
logging.exception(f"Error loading credentials: {e}") | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/models.py | common/data_source/models.py | """Data model definitions for all connectors"""
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Optional, List, Sequence, NamedTuple
from typing_extensions import TypedDict, NotRequired
from pydantic import BaseModel
@dataclass(frozen=True)
class ExternalAccess:
# arbitrary limit to prevent excessively large permissions sets
# not internally enforced ... the caller can check this before using the instance
MAX_NUM_ENTRIES = 5000
# Emails of external users with access to the doc externally
external_user_emails: set[str]
# Names or external IDs of groups with access to the doc
external_user_group_ids: set[str]
# Whether the document is public in the external system or Onyx
is_public: bool
def __str__(self) -> str:
"""Prevent extremely long logs"""
def truncate_set(s: set[str], max_len: int = 100) -> str:
s_str = str(s)
if len(s_str) > max_len:
return f"{s_str[:max_len]}... ({len(s)} items)"
return s_str
return (
f"ExternalAccess("
f"external_user_emails={truncate_set(self.external_user_emails)}, "
f"external_user_group_ids={truncate_set(self.external_user_group_ids)}, "
f"is_public={self.is_public})"
)
@property
def num_entries(self) -> int:
return len(self.external_user_emails) + len(self.external_user_group_ids)
@classmethod
def public(cls) -> "ExternalAccess":
return cls(
external_user_emails=set(),
external_user_group_ids=set(),
is_public=True,
)
@classmethod
def empty(cls) -> "ExternalAccess":
"""
A helper function that returns an *empty* set of external user-emails and group-ids, and sets `is_public` to `False`.
This effectively makes the document in question "private" or inaccessible to anyone else.
This is especially helpful to use when you are performing permission-syncing, and some document's permissions can't
be determined (for whatever reason). Setting its `ExternalAccess` to "private" is a feasible fallback.
"""
return cls(
external_user_emails=set(),
external_user_group_ids=set(),
is_public=False,
)
class ExtractionResult(NamedTuple):
"""Structured result from text and image extraction from various file types."""
text_content: str
embedded_images: Sequence[tuple[bytes, str]]
metadata: dict[str, Any]
class TextSection(BaseModel):
"""Text section model"""
link: str
text: str
class ImageSection(BaseModel):
"""Image section model"""
link: str
image_file_id: str
class Document(BaseModel):
"""Document model"""
id: str
source: str
semantic_identifier: str
extension: str
blob: bytes
doc_updated_at: datetime
size_bytes: int
externale_access: Optional[ExternalAccess] = None
primary_owners: Optional[list] = None
metadata: Optional[dict[str, Any]] = None
doc_metadata: Optional[dict[str, Any]] = None
class BasicExpertInfo(BaseModel):
"""Expert information model"""
display_name: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
email: Optional[str] = None
def get_semantic_name(self) -> str:
"""Get semantic name for display"""
if self.display_name:
return self.display_name
elif self.first_name and self.last_name:
return f"{self.first_name} {self.last_name}"
elif self.first_name:
return self.first_name
elif self.last_name:
return self.last_name
else:
return "Unknown"
class SlimDocument(BaseModel):
"""Simplified document model (contains only ID and permission info)"""
id: str
external_access: Optional[Any] = None
class ConnectorCheckpoint(BaseModel):
"""Connector checkpoint model"""
has_more: bool = True
class DocumentFailure(BaseModel):
"""Document processing failure information"""
document_id: str
document_link: str
class EntityFailure(BaseModel):
"""Entity processing failure information"""
entity_id: str
missed_time_range: tuple[datetime, datetime]
class ConnectorFailure(BaseModel):
"""Connector failure information"""
failed_document: Optional[DocumentFailure] = None
failed_entity: Optional[EntityFailure] = None
failure_message: str
exception: Optional[Exception] = None
model_config = {"arbitrary_types_allowed": True}
# Gmail Models
class GmailCredentials(BaseModel):
"""Gmail authentication credentials model"""
primary_admin_email: str
credentials: dict[str, Any]
class GmailThread(BaseModel):
"""Gmail thread data model"""
id: str
messages: list[dict[str, Any]]
class GmailMessage(BaseModel):
"""Gmail message data model"""
id: str
payload: dict[str, Any]
label_ids: Optional[list[str]] = None
# Notion Models
class NotionPage(BaseModel):
"""Represents a Notion Page object"""
id: str
created_time: str
last_edited_time: str
archived: bool
properties: dict[str, Any]
url: str
parent: Optional[dict[str, Any]] = None # Parent reference for path reconstruction
database_name: Optional[str] = None # Only applicable to database type pages
class NotionBlock(BaseModel):
"""Represents a Notion Block object"""
id: str # Used for the URL
text: str
prefix: str # How this block should be joined with existing text
class NotionSearchResponse(BaseModel):
"""Represents the response from the Notion Search API"""
results: list[dict[str, Any]]
next_cursor: Optional[str]
has_more: bool = False
class NotionCredentials(BaseModel):
"""Notion authentication credentials model"""
integration_token: str
# Slack Models
class ChannelTopicPurposeType(TypedDict):
"""Slack channel topic or purpose"""
value: str
creator: str
last_set: int
class ChannelType(TypedDict):
"""Slack channel"""
id: str
name: str
is_channel: bool
is_group: bool
is_im: bool
created: int
creator: str
is_archived: bool
is_general: bool
unlinked: int
name_normalized: str
is_shared: bool
is_ext_shared: bool
is_org_shared: bool
pending_shared: List[str]
is_pending_ext_shared: bool
is_member: bool
is_private: bool
is_mpim: bool
updated: int
topic: ChannelTopicPurposeType
purpose: ChannelTopicPurposeType
previous_names: List[str]
num_members: int
class AttachmentType(TypedDict):
"""Slack message attachment"""
service_name: NotRequired[str]
text: NotRequired[str]
fallback: NotRequired[str]
thumb_url: NotRequired[str]
thumb_width: NotRequired[int]
thumb_height: NotRequired[int]
id: NotRequired[int]
class BotProfileType(TypedDict):
"""Slack bot profile"""
id: NotRequired[str]
deleted: NotRequired[bool]
name: NotRequired[str]
updated: NotRequired[int]
app_id: NotRequired[str]
team_id: NotRequired[str]
class MessageType(TypedDict):
"""Slack message"""
type: str
user: str
text: str
ts: str
attachments: NotRequired[List[AttachmentType]]
bot_id: NotRequired[str]
app_id: NotRequired[str]
bot_profile: NotRequired[BotProfileType]
thread_ts: NotRequired[str]
subtype: NotRequired[str]
# Thread message list
ThreadType = List[MessageType]
class SlackCheckpoint(TypedDict):
"""Slack checkpoint"""
channel_ids: List[str] | None
channel_completion_map: dict[str, str]
current_channel: ChannelType | None
current_channel_access: Any | None
seen_thread_ts: List[str]
has_more: bool
class SlackMessageFilterReason(str):
"""Slack message filter reason"""
BOT = "bot"
DISALLOWED = "disallowed"
class ProcessedSlackMessage:
"""Processed Slack message"""
def __init__(self, doc=None, thread_or_message_ts=None, filter_reason=None, failure=None):
self.doc = doc
self.thread_or_message_ts = thread_or_message_ts
self.filter_reason = filter_reason
self.failure = failure
# Type aliases for type hints
SecondsSinceUnixEpoch = float
GenerateDocumentsOutput = Any
GenerateSlimDocumentOutput = Any
CheckpointOutput = Any
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/dropbox_connector.py | common/data_source/dropbox_connector.py | """Dropbox connector"""
import logging
from datetime import timezone
from typing import Any
from dropbox import Dropbox
from dropbox.exceptions import ApiError, AuthError
from dropbox.files import FileMetadata, FolderMetadata
from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
InsufficientPermissionsError,
)
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document, GenerateDocumentsOutput
from common.data_source.utils import get_file_ext
logger = logging.getLogger(__name__)
class DropboxConnector(LoadConnector, PollConnector):
"""Dropbox connector for accessing Dropbox files and folders"""
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self.dropbox_client: Dropbox | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load Dropbox credentials"""
access_token = credentials.get("dropbox_access_token")
if not access_token:
raise ConnectorMissingCredentialError("Dropbox access token is required")
self.dropbox_client = Dropbox(access_token)
return None
def validate_connector_settings(self) -> None:
"""Validate Dropbox connector settings"""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
try:
self.dropbox_client.files_list_folder(path="", limit=1)
except AuthError as e:
logger.exception("[Dropbox]: Failed to validate Dropbox credentials")
raise ConnectorValidationError(f"Dropbox credential is invalid: {e}")
except ApiError as e:
if e.error is not None and "insufficient_permissions" in str(e.error).lower():
raise InsufficientPermissionsError("Your Dropbox token does not have sufficient permissions.")
raise ConnectorValidationError(f"Unexpected Dropbox error during validation: {e.user_message_text or e}")
except Exception as e:
raise ConnectorValidationError(f"Unexpected error during Dropbox settings validation: {e}")
def _download_file(self, path: str) -> bytes:
"""Download a single file from Dropbox."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
_, resp = self.dropbox_client.files_download(path)
return resp.content
def _get_shared_link(self, path: str) -> str:
"""Create a shared link for a file in Dropbox."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
try:
shared_links = self.dropbox_client.sharing_list_shared_links(path=path)
if shared_links.links:
return shared_links.links[0].url
link_metadata = self.dropbox_client.sharing_create_shared_link_with_settings(path)
return link_metadata.url
except ApiError as err:
logger.exception(f"[Dropbox]: Failed to create a shared link for {path}: {err}")
return ""
def _yield_files_recursive(
self,
path: str,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
) -> GenerateDocumentsOutput:
"""Yield files in batches from a specified Dropbox folder, including subfolders."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
# Collect all files first to count filename occurrences
all_files = []
self._collect_files_recursive(path, start, end, all_files)
# Count filename occurrences
filename_counts: dict[str, int] = {}
for entry, _ in all_files:
filename_counts[entry.name] = filename_counts.get(entry.name, 0) + 1
# Process files in batches
batch: list[Document] = []
for entry, downloaded_file in all_files:
modified_time = entry.client_modified
if modified_time.tzinfo is None:
modified_time = modified_time.replace(tzinfo=timezone.utc)
else:
modified_time = modified_time.astimezone(timezone.utc)
# Use full path only if filename appears multiple times
if filename_counts.get(entry.name, 0) > 1:
# Remove leading slash and replace slashes with ' / '
relative_path = entry.path_display.lstrip('/')
semantic_id = relative_path.replace('/', ' / ') if relative_path else entry.name
else:
semantic_id = entry.name
batch.append(
Document(
id=f"dropbox:{entry.id}",
blob=downloaded_file,
source=DocumentSource.DROPBOX,
semantic_identifier=semantic_id,
extension=get_file_ext(entry.name),
doc_updated_at=modified_time,
size_bytes=entry.size if getattr(entry, "size", None) is not None else len(downloaded_file),
)
)
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def _collect_files_recursive(
self,
path: str,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
all_files: list,
) -> None:
"""Recursively collect all files matching time criteria."""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
result = self.dropbox_client.files_list_folder(
path,
recursive=False,
include_non_downloadable_files=False,
)
while True:
for entry in result.entries:
if isinstance(entry, FileMetadata):
modified_time = entry.client_modified
if modified_time.tzinfo is None:
modified_time = modified_time.replace(tzinfo=timezone.utc)
else:
modified_time = modified_time.astimezone(timezone.utc)
time_as_seconds = modified_time.timestamp()
if start is not None and time_as_seconds <= start:
continue
if end is not None and time_as_seconds > end:
continue
try:
downloaded_file = self._download_file(entry.path_display)
all_files.append((entry, downloaded_file))
except Exception:
logger.exception(f"[Dropbox]: Error downloading file {entry.path_display}")
continue
elif isinstance(entry, FolderMetadata):
self._collect_files_recursive(entry.path_lower, start, end, all_files)
if not result.has_more:
break
result = self.dropbox_client.files_list_folder_continue(result.cursor)
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Dropbox for recent file changes"""
if self.dropbox_client is None:
raise ConnectorMissingCredentialError("Dropbox")
for batch in self._yield_files_recursive("", start, end):
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load files from Dropbox state"""
return self._yield_files_recursive("", None, None)
if __name__ == "__main__":
import os
logging.basicConfig(level=logging.DEBUG)
connector = DropboxConnector()
connector.load_credentials({"dropbox_access_token": os.environ.get("DROPBOX_ACCESS_TOKEN")})
connector.validate_connector_settings()
document_batches = connector.load_from_state()
try:
first_batch = next(document_batches)
print(f"Loaded {len(first_batch)} documents in first batch.")
for doc in first_batch:
print(f"- {doc.semantic_identifier} ({doc.size_bytes} bytes)")
except StopIteration:
print("No documents available in Dropbox.")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/zendesk_connector.py | common/data_source/zendesk_connector.py | import copy
import logging
import time
from collections.abc import Callable
from collections.abc import Iterator
from typing import Any
import requests
from pydantic import BaseModel
from requests.exceptions import HTTPError
from typing_extensions import override
from common.data_source.config import ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS, DocumentSource
from common.data_source.exceptions import ConnectorValidationError, CredentialExpiredError, InsufficientPermissionsError
from common.data_source.html_utils import parse_html_page_basic
from common.data_source.interfaces import CheckpointOutput, CheckpointOutputWrapper, CheckpointedConnector, IndexingHeartbeatInterface, SlimConnectorWithPermSync
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, ConnectorFailure, Document, DocumentFailure, GenerateSlimDocumentOutput, SecondsSinceUnixEpoch, SlimDocument
from common.data_source.utils import retry_builder, time_str_to_utc,rate_limit_builder
MAX_PAGE_SIZE = 30 # Zendesk API maximum
MAX_AUTHOR_MAP_SIZE = 50_000 # Reset author map cache if it gets too large
_SLIM_BATCH_SIZE = 1000
class ZendeskCredentialsNotSetUpError(PermissionError):
def __init__(self) -> None:
super().__init__(
"Zendesk Credentials are not set up, was load_credentials called?"
)
class ZendeskClient:
def __init__(
self,
subdomain: str,
email: str,
token: str,
calls_per_minute: int | None = None,
):
self.base_url = f"https://{subdomain}.zendesk.com/api/v2"
self.auth = (f"{email}/token", token)
self.make_request = request_with_rate_limit(self, calls_per_minute)
def request_with_rate_limit(
client: ZendeskClient, max_calls_per_minute: int | None = None
) -> Callable[[str, dict[str, Any]], dict[str, Any]]:
@retry_builder()
@(
rate_limit_builder(max_calls=max_calls_per_minute, period=60)
if max_calls_per_minute
else lambda x: x
)
def make_request(endpoint: str, params: dict[str, Any]) -> dict[str, Any]:
response = requests.get(
f"{client.base_url}/{endpoint}", auth=client.auth, params=params
)
if response.status_code == 429:
retry_after = response.headers.get("Retry-After")
if retry_after is not None:
# Sleep for the duration indicated by the Retry-After header
time.sleep(int(retry_after))
elif (
response.status_code == 403
and response.json().get("error") == "SupportProductInactive"
):
return response.json()
response.raise_for_status()
return response.json()
return make_request
class ZendeskPageResponse(BaseModel):
data: list[dict[str, Any]]
meta: dict[str, Any]
has_more: bool
def _get_content_tag_mapping(client: ZendeskClient) -> dict[str, str]:
content_tags: dict[str, str] = {}
params = {"page[size]": MAX_PAGE_SIZE}
try:
while True:
data = client.make_request("guide/content_tags", params)
for tag in data.get("records", []):
content_tags[tag["id"]] = tag["name"]
# Check if there are more pages
if data.get("meta", {}).get("has_more", False):
params["page[after]"] = data["meta"]["after_cursor"]
else:
break
return content_tags
except Exception as e:
raise Exception(f"Error fetching content tags: {str(e)}")
def _get_articles(
client: ZendeskClient, start_time: int | None = None, page_size: int = MAX_PAGE_SIZE
) -> Iterator[dict[str, Any]]:
params = {"page[size]": page_size, "sort_by": "updated_at", "sort_order": "asc"}
if start_time is not None:
params["start_time"] = start_time
while True:
data = client.make_request("help_center/articles", params)
for article in data["articles"]:
yield article
if not data.get("meta", {}).get("has_more"):
break
params["page[after]"] = data["meta"]["after_cursor"]
def _get_article_page(
client: ZendeskClient,
start_time: int | None = None,
after_cursor: str | None = None,
page_size: int = MAX_PAGE_SIZE,
) -> ZendeskPageResponse:
params = {"page[size]": page_size, "sort_by": "updated_at", "sort_order": "asc"}
if start_time is not None:
params["start_time"] = start_time
if after_cursor is not None:
params["page[after]"] = after_cursor
data = client.make_request("help_center/articles", params)
return ZendeskPageResponse(
data=data["articles"],
meta=data["meta"],
has_more=bool(data["meta"].get("has_more", False)),
)
def _get_tickets(
client: ZendeskClient, start_time: int | None = None
) -> Iterator[dict[str, Any]]:
params = {"start_time": start_time or 0}
while True:
data = client.make_request("incremental/tickets.json", params)
for ticket in data["tickets"]:
yield ticket
if not data.get("end_of_stream", False):
params["start_time"] = data["end_time"]
else:
break
# TODO: maybe these don't need to be their own functions?
def _get_tickets_page(
client: ZendeskClient, start_time: int | None = None
) -> ZendeskPageResponse:
params = {"start_time": start_time or 0}
# NOTE: for some reason zendesk doesn't seem to be respecting the start_time param
# in my local testing with very few tickets. We'll look into it if this becomes an
# issue in larger deployments
data = client.make_request("incremental/tickets.json", params)
if data.get("error") == "SupportProductInactive":
raise ValueError(
"Zendesk Support Product is not active for this account, No tickets to index"
)
return ZendeskPageResponse(
data=data["tickets"],
meta={"end_time": data["end_time"]},
has_more=not bool(data.get("end_of_stream", False)),
)
def _fetch_author(
client: ZendeskClient, author_id: str | int
) -> BasicExpertInfo | None:
# Skip fetching if author_id is invalid
# cast to str to avoid issues with zendesk changing their types
if not author_id or str(author_id) == "-1":
return None
try:
author_data = client.make_request(f"users/{author_id}", {})
user = author_data.get("user")
return (
BasicExpertInfo(display_name=user.get("name"), email=user.get("email"))
if user and user.get("name") and user.get("email")
else None
)
except requests.exceptions.HTTPError:
# Handle any API errors gracefully
return None
def _article_to_document(
article: dict[str, Any],
content_tags: dict[str, str],
author_map: dict[str, BasicExpertInfo],
client: ZendeskClient,
) -> tuple[dict[str, BasicExpertInfo] | None, Document]:
author_id = article.get("author_id")
if not author_id:
author = None
else:
author = (
author_map.get(author_id)
if author_id in author_map
else _fetch_author(client, author_id)
)
new_author_mapping = {author_id: author} if author_id and author else None
updated_at = article.get("updated_at")
update_time = time_str_to_utc(updated_at) if updated_at else None
text = parse_html_page_basic(article.get("body") or "")
blob = text.encode("utf-8", errors="replace")
# Build metadata
metadata: dict[str, str | list[str]] = {
"labels": [str(label) for label in article.get("label_names", []) if label],
"content_tags": [
content_tags[tag_id]
for tag_id in article.get("content_tag_ids", [])
if tag_id in content_tags
],
}
# Remove empty values
metadata = {k: v for k, v in metadata.items() if v}
return new_author_mapping, Document(
id=f"article:{article['id']}",
source=DocumentSource.ZENDESK,
semantic_identifier=article["title"],
extension=".txt",
blob=blob,
size_bytes=len(blob),
doc_updated_at=update_time,
primary_owners=[author] if author else None,
metadata=metadata,
)
def _get_comment_text(
comment: dict[str, Any],
author_map: dict[str, BasicExpertInfo],
client: ZendeskClient,
) -> tuple[dict[str, BasicExpertInfo] | None, str]:
author_id = comment.get("author_id")
if not author_id:
author = None
else:
author = (
author_map.get(author_id)
if author_id in author_map
else _fetch_author(client, author_id)
)
new_author_mapping = {author_id: author} if author_id and author else None
comment_text = f"Comment{' by ' + author.display_name if author and author.display_name else ''}"
comment_text += f"{' at ' + comment['created_at'] if comment.get('created_at') else ''}:\n{comment['body']}"
return new_author_mapping, comment_text
def _ticket_to_document(
ticket: dict[str, Any],
author_map: dict[str, BasicExpertInfo],
client: ZendeskClient,
) -> tuple[dict[str, BasicExpertInfo] | None, Document]:
submitter_id = ticket.get("submitter")
if not submitter_id:
submitter = None
else:
submitter = (
author_map.get(submitter_id)
if submitter_id in author_map
else _fetch_author(client, submitter_id)
)
new_author_mapping = (
{submitter_id: submitter} if submitter_id and submitter else None
)
updated_at = ticket.get("updated_at")
update_time = time_str_to_utc(updated_at) if updated_at else None
metadata: dict[str, str | list[str]] = {}
if status := ticket.get("status"):
metadata["status"] = status
if priority := ticket.get("priority"):
metadata["priority"] = priority
if tags := ticket.get("tags"):
metadata["tags"] = tags
if ticket_type := ticket.get("type"):
metadata["ticket_type"] = ticket_type
# Fetch comments for the ticket
comments_data = client.make_request(f"tickets/{ticket.get('id')}/comments", {})
comments = comments_data.get("comments", [])
comment_texts = []
for comment in comments:
new_author_mapping, comment_text = _get_comment_text(
comment, author_map, client
)
if new_author_mapping:
author_map.update(new_author_mapping)
comment_texts.append(comment_text)
comments_text = "\n\n".join(comment_texts)
subject = ticket.get("subject")
full_text = f"Ticket Subject:\n{subject}\n\nComments:\n{comments_text}"
blob = full_text.encode("utf-8", errors="replace")
return new_author_mapping, Document(
id=f"zendesk_ticket_{ticket['id']}",
blob=blob,
extension=".txt",
size_bytes=len(blob),
source=DocumentSource.ZENDESK,
semantic_identifier=f"Ticket #{ticket['id']}: {subject or 'No Subject'}",
doc_updated_at=update_time,
primary_owners=[submitter] if submitter else None,
metadata=metadata,
)
class ZendeskConnectorCheckpoint(ConnectorCheckpoint):
# We use cursor-based paginated retrieval for articles
after_cursor_articles: str | None
# We use timestamp-based paginated retrieval for tickets
next_start_time_tickets: int | None
cached_author_map: dict[str, BasicExpertInfo] | None
cached_content_tags: dict[str, str] | None
class ZendeskConnector(
SlimConnectorWithPermSync, CheckpointedConnector[ZendeskConnectorCheckpoint]
):
def __init__(
self,
content_type: str = "articles",
calls_per_minute: int | None = None,
) -> None:
self.content_type = content_type
self.subdomain = ""
# Fetch all tags ahead of time
self.content_tags: dict[str, str] = {}
self.calls_per_minute = calls_per_minute
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
# Subdomain is actually the whole URL
subdomain = (
credentials["zendesk_subdomain"]
.replace("https://", "")
.split(".zendesk.com")[0]
)
self.subdomain = subdomain
self.client = ZendeskClient(
subdomain,
credentials["zendesk_email"],
credentials["zendesk_token"],
calls_per_minute=self.calls_per_minute,
)
return None
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ZendeskConnectorCheckpoint,
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
if checkpoint.cached_content_tags is None:
checkpoint.cached_content_tags = _get_content_tag_mapping(self.client)
return checkpoint # save the content tags to the checkpoint
self.content_tags = checkpoint.cached_content_tags
if self.content_type == "articles":
checkpoint = yield from self._retrieve_articles(start, end, checkpoint)
return checkpoint
elif self.content_type == "tickets":
checkpoint = yield from self._retrieve_tickets(start, end, checkpoint)
return checkpoint
else:
raise ValueError(f"Unsupported content_type: {self.content_type}")
def _retrieve_articles(
self,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
checkpoint: ZendeskConnectorCheckpoint,
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
checkpoint = copy.deepcopy(checkpoint)
# This one is built on the fly as there may be more many more authors than tags
author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
after_cursor = checkpoint.after_cursor_articles
doc_batch: list[Document] = []
response = _get_article_page(
self.client,
start_time=int(start) if start else None,
after_cursor=after_cursor,
)
articles = response.data
has_more = response.has_more
after_cursor = response.meta.get("after_cursor")
for article in articles:
if (
article.get("body") is None
or article.get("draft")
or any(
label in ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS
for label in article.get("label_names", [])
)
):
continue
try:
new_author_map, document = _article_to_document(
article, self.content_tags, author_map, self.client
)
except Exception as e:
logging.error(f"Error processing article {article['id']}: {e}")
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=f"{article.get('id')}",
document_link=article.get("html_url", ""),
),
failure_message=str(e),
exception=e,
)
continue
if new_author_map:
author_map.update(new_author_map)
updated_at = document.doc_updated_at
updated_ts = updated_at.timestamp() if updated_at else None
if updated_ts is not None:
if start is not None and updated_ts <= start:
continue
if end is not None and updated_ts > end:
continue
doc_batch.append(document)
if not has_more:
yield from doc_batch
checkpoint.has_more = False
return checkpoint
# Sometimes no documents are retrieved, but the cursor
# is still updated so the connector makes progress.
yield from doc_batch
checkpoint.after_cursor_articles = after_cursor
last_doc_updated_at = doc_batch[-1].doc_updated_at if doc_batch else None
checkpoint.has_more = bool(
end is None
or last_doc_updated_at is None
or last_doc_updated_at.timestamp() <= end
)
checkpoint.cached_author_map = (
author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
)
return checkpoint
def _retrieve_tickets(
self,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None,
checkpoint: ZendeskConnectorCheckpoint,
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
checkpoint = copy.deepcopy(checkpoint)
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
doc_batch: list[Document] = []
next_start_time = int(checkpoint.next_start_time_tickets or start or 0)
ticket_response = _get_tickets_page(self.client, start_time=next_start_time)
tickets = ticket_response.data
has_more = ticket_response.has_more
next_start_time = ticket_response.meta["end_time"]
for ticket in tickets:
if ticket.get("status") == "deleted":
continue
try:
new_author_map, document = _ticket_to_document(
ticket=ticket,
author_map=author_map,
client=self.client,
)
except Exception as e:
logging.error(f"Error processing ticket {ticket['id']}: {e}")
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=f"{ticket.get('id')}",
document_link=ticket.get("url", ""),
),
failure_message=str(e),
exception=e,
)
continue
if new_author_map:
author_map.update(new_author_map)
updated_at = document.doc_updated_at
updated_ts = updated_at.timestamp() if updated_at else None
if updated_ts is not None:
if start is not None and updated_ts <= start:
continue
if end is not None and updated_ts > end:
continue
doc_batch.append(document)
if not has_more:
yield from doc_batch
checkpoint.has_more = False
return checkpoint
yield from doc_batch
checkpoint.next_start_time_tickets = next_start_time
last_doc_updated_at = doc_batch[-1].doc_updated_at if doc_batch else None
checkpoint.has_more = bool(
end is None
or last_doc_updated_at is None
or last_doc_updated_at.timestamp() <= end
)
checkpoint.cached_author_map = (
author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
)
return checkpoint
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> GenerateSlimDocumentOutput:
slim_doc_batch: list[SlimDocument] = []
if self.content_type == "articles":
articles = _get_articles(
self.client, start_time=int(start) if start else None
)
for article in articles:
slim_doc_batch.append(
SlimDocument(
id=f"article:{article['id']}",
)
)
if len(slim_doc_batch) >= _SLIM_BATCH_SIZE:
yield slim_doc_batch
slim_doc_batch = []
elif self.content_type == "tickets":
tickets = _get_tickets(
self.client, start_time=int(start) if start else None
)
for ticket in tickets:
slim_doc_batch.append(
SlimDocument(
id=f"zendesk_ticket_{ticket['id']}",
)
)
if len(slim_doc_batch) >= _SLIM_BATCH_SIZE:
yield slim_doc_batch
slim_doc_batch = []
else:
raise ValueError(f"Unsupported content_type: {self.content_type}")
if slim_doc_batch:
yield slim_doc_batch
@override
def validate_connector_settings(self) -> None:
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
try:
_get_article_page(self.client, start_time=0)
except HTTPError as e:
# Check for HTTP status codes
if e.response.status_code == 401:
raise CredentialExpiredError(
"Your Zendesk credentials appear to be invalid or expired (HTTP 401)."
) from e
elif e.response.status_code == 403:
raise InsufficientPermissionsError(
"Your Zendesk token does not have sufficient permissions (HTTP 403)."
) from e
elif e.response.status_code == 404:
raise ConnectorValidationError(
"Zendesk resource not found (HTTP 404)."
) from e
else:
raise ConnectorValidationError(
f"Unexpected Zendesk error (status={e.response.status_code}): {e}"
) from e
@override
def validate_checkpoint_json(
self, checkpoint_json: str
) -> ZendeskConnectorCheckpoint:
return ZendeskConnectorCheckpoint.model_validate_json(checkpoint_json)
@override
def build_dummy_checkpoint(self) -> ZendeskConnectorCheckpoint:
return ZendeskConnectorCheckpoint(
after_cursor_articles=None,
next_start_time_tickets=None,
cached_author_map=None,
cached_content_tags=None,
has_more=True,
)
if __name__ == "__main__":
import os
connector = ZendeskConnector(content_type="articles")
connector.load_credentials(
{
"zendesk_subdomain": os.environ["ZENDESK_SUBDOMAIN"],
"zendesk_email": os.environ["ZENDESK_EMAIL"],
"zendesk_token": os.environ["ZENDESK_TOKEN"],
}
)
current = time.time()
one_day_ago = current - 24 * 60 * 60 # 1 day
checkpoint = connector.build_dummy_checkpoint()
while checkpoint.has_more:
gen = connector.load_from_checkpoint(
one_day_ago, current, checkpoint
)
wrapper = CheckpointOutputWrapper()
any_doc = False
for document, failure, next_checkpoint in wrapper(gen):
if document:
print("got document:", document.id)
any_doc = True
checkpoint = next_checkpoint
if any_doc:
break | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/slack_connector.py | common/data_source/slack_connector.py | """Slack connector"""
import itertools
import logging
import re
from collections.abc import Callable, Generator
from datetime import datetime, timezone
from http.client import IncompleteRead, RemoteDisconnected
from typing import Any, cast
from urllib.error import URLError
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from slack_sdk.http_retry import ConnectionErrorRetryHandler
from slack_sdk.http_retry.builtin_interval_calculators import FixedValueRetryIntervalCalculator
from common.data_source.config import (
INDEX_BATCH_SIZE, SLACK_NUM_THREADS, ENABLE_EXPENSIVE_EXPERT_CALLS,
_SLACK_LIMIT, FAST_TIMEOUT, MAX_RETRIES, MAX_CHANNELS_TO_LOG
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError
)
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
CredentialsConnector,
SlimConnectorWithPermSync
)
from common.data_source.models import (
BasicExpertInfo,
ConnectorCheckpoint,
ConnectorFailure,
Document,
DocumentFailure,
SlimDocument,
TextSection,
SecondsSinceUnixEpoch,
GenerateSlimDocumentOutput, MessageType, SlackMessageFilterReason, ChannelType, ThreadType, ProcessedSlackMessage,
CheckpointOutput
)
from common.data_source.utils import make_paginated_slack_api_call, SlackTextCleaner, expert_info_from_slack_id, \
get_message_link
# Disallowed message subtypes list
_DISALLOWED_MSG_SUBTYPES = {
"channel_join", "channel_leave", "channel_archive", "channel_unarchive",
"pinned_item", "unpinned_item", "ekm_access_denied", "channel_posting_permissions",
"group_join", "group_leave", "group_archive", "group_unarchive",
"channel_leave", "channel_name", "channel_join",
}
def default_msg_filter(message: MessageType) -> SlackMessageFilterReason | None:
"""Default message filter"""
# Filter bot messages
if message.get("bot_id") or message.get("app_id"):
bot_profile_name = message.get("bot_profile", {}).get("name")
if bot_profile_name == "DanswerBot Testing":
return None
return SlackMessageFilterReason.BOT
# Filter non-informative content
if message.get("subtype", "") in _DISALLOWED_MSG_SUBTYPES:
return SlackMessageFilterReason.DISALLOWED
return None
def _collect_paginated_channels(
client: WebClient,
exclude_archived: bool,
channel_types: list[str],
) -> list[ChannelType]:
"""收集分页的频道列表"""
channels: list[ChannelType] = []
for result in make_paginated_slack_api_call(
client.conversations_list,
exclude_archived=exclude_archived,
types=channel_types,
):
channels.extend(result["channels"])
return channels
def get_channels(
client: WebClient,
exclude_archived: bool = True,
get_public: bool = True,
get_private: bool = True,
) -> list[ChannelType]:
channel_types = []
if get_public:
channel_types.append("public_channel")
if get_private:
channel_types.append("private_channel")
# First try to get public and private channels
try:
channels = _collect_paginated_channels(
client=client,
exclude_archived=exclude_archived,
channel_types=channel_types,
)
except SlackApiError as e:
msg = f"Unable to fetch private channels due to: {e}."
if not get_public:
logging.warning(msg + " Public channels are not enabled.")
return []
logging.warning(msg + " Trying again with public channels only.")
channel_types = ["public_channel"]
channels = _collect_paginated_channels(
client=client,
exclude_archived=exclude_archived,
channel_types=channel_types,
)
return channels
def get_channel_messages(
client: WebClient,
channel: ChannelType,
oldest: str | None = None,
latest: str | None = None,
callback: Any = None,
) -> Generator[list[MessageType], None, None]:
"""Get all messages in a channel"""
# Join channel so bot can access messages
if not channel["is_member"]:
client.conversations_join(
channel=channel["id"],
is_private=channel["is_private"],
)
logging.info(f"Successfully joined '{channel['name']}'")
for result in make_paginated_slack_api_call(
client.conversations_history,
channel=channel["id"],
oldest=oldest,
latest=latest,
):
if callback:
if callback.should_stop():
raise RuntimeError("get_channel_messages: Stop signal detected")
callback.progress("get_channel_messages", 0)
yield cast(list[MessageType], result["messages"])
def get_thread(client: WebClient, channel_id: str, thread_id: str) -> ThreadType:
threads: list[MessageType] = []
for result in make_paginated_slack_api_call(
client.conversations_replies, channel=channel_id, ts=thread_id
):
threads.extend(result["messages"])
return threads
def get_latest_message_time(thread: ThreadType) -> datetime:
max_ts = max([float(msg.get("ts", 0)) for msg in thread])
return datetime.fromtimestamp(max_ts, tz=timezone.utc)
def _build_doc_id(channel_id: str, thread_ts: str) -> str:
return f"{channel_id}__{thread_ts}"
def thread_to_doc(
channel: ChannelType,
thread: ThreadType,
slack_cleaner: SlackTextCleaner,
client: WebClient,
user_cache: dict[str, BasicExpertInfo | None],
channel_access: Any | None,
) -> Document:
channel_id = channel["id"]
initial_sender_expert_info = expert_info_from_slack_id(
user_id=thread[0].get("user"), client=client, user_cache=user_cache
)
initial_sender_name = (
initial_sender_expert_info.get_semantic_name()
if initial_sender_expert_info
else "Unknown"
)
valid_experts = None
if ENABLE_EXPENSIVE_EXPERT_CALLS:
all_sender_ids = [m.get("user") for m in thread]
experts = [
expert_info_from_slack_id(
user_id=sender_id, client=client, user_cache=user_cache
)
for sender_id in all_sender_ids
if sender_id
]
valid_experts = [expert for expert in experts if expert]
first_message = slack_cleaner.index_clean(cast(str, thread[0]["text"]))
snippet = (
first_message[:50].rstrip() + "..."
if len(first_message) > 50
else first_message
)
doc_sem_id = f"{initial_sender_name} in #{channel['name']}: {snippet}".replace(
"\n", " "
)
return Document(
id=_build_doc_id(channel_id=channel_id, thread_ts=thread[0]["ts"]),
sections=[
TextSection(
link=get_message_link(event=m, client=client, channel_id=channel_id),
text=slack_cleaner.index_clean(cast(str, m["text"])),
)
for m in thread
],
source="slack",
semantic_identifier=doc_sem_id,
doc_updated_at=get_latest_message_time(thread),
primary_owners=valid_experts,
metadata={"Channel": channel["name"]},
external_access=channel_access,
)
def filter_channels(
all_channels: list[ChannelType],
channels_to_connect: list[str] | None,
regex_enabled: bool,
) -> list[ChannelType]:
if not channels_to_connect:
return all_channels
if regex_enabled:
return [
channel
for channel in all_channels
if any(
re.fullmatch(channel_to_connect, channel["name"])
for channel_to_connect in channels_to_connect
)
]
# Validate all specified channels are valid
all_channel_names = {channel["name"] for channel in all_channels}
for channel in channels_to_connect:
if channel not in all_channel_names:
raise ValueError(
f"Channel '{channel}' not found in workspace. "
f"Available channels (Showing {len(all_channel_names)} of "
f"{min(len(all_channel_names), MAX_CHANNELS_TO_LOG)}): "
f"{list(itertools.islice(all_channel_names, MAX_CHANNELS_TO_LOG))}"
)
return [
channel for channel in all_channels if channel["name"] in channels_to_connect
]
def _get_channel_by_id(client: WebClient, channel_id: str) -> ChannelType:
response = client.conversations_info(
channel=channel_id,
)
return cast(ChannelType, response["channel"])
def _get_messages(
channel: ChannelType,
client: WebClient,
oldest: str | None = None,
latest: str | None = None,
limit: int = _SLACK_LIMIT,
) -> tuple[list[MessageType], bool]:
"""Get messages (Slack returns from newest to oldest)"""
# Must join channel to read messages
if not channel["is_member"]:
try:
client.conversations_join(
channel=channel["id"],
is_private=channel["is_private"],
)
except SlackApiError as e:
if e.response["error"] == "is_archived":
logging.warning(f"Channel {channel['name']} is archived. Skipping.")
return [], False
logging.exception(f"Error joining channel {channel['name']}")
raise
logging.info(f"Successfully joined '{channel['name']}'")
response = client.conversations_history(
channel=channel["id"],
oldest=oldest,
latest=latest,
limit=limit,
)
response.validate()
messages = cast(list[MessageType], response.get("messages", []))
cursor = cast(dict[str, Any], response.get("response_metadata", {})).get(
"next_cursor", ""
)
has_more = bool(cursor)
return messages, has_more
def _message_to_doc(
message: MessageType,
client: WebClient,
channel: ChannelType,
slack_cleaner: SlackTextCleaner,
user_cache: dict[str, BasicExpertInfo | None],
seen_thread_ts: set[str],
channel_access: Any | None,
msg_filter_func: Callable[
[MessageType], SlackMessageFilterReason | None
] = default_msg_filter,
) -> tuple[Document | None, SlackMessageFilterReason | None]:
"""Convert message to document"""
filtered_thread: ThreadType | None = None
filter_reason: SlackMessageFilterReason | None = None
thread_ts = message.get("thread_ts")
if thread_ts:
# If thread_ts exists, need to process thread
if thread_ts in seen_thread_ts:
return None, None
thread = get_thread(
client=client, channel_id=channel["id"], thread_id=thread_ts
)
filtered_thread = []
for message in thread:
filter_reason = msg_filter_func(message)
if filter_reason:
continue
filtered_thread.append(message)
else:
filter_reason = msg_filter_func(message)
if filter_reason:
return None, filter_reason
filtered_thread = [message]
if not filtered_thread:
return None, filter_reason
doc = thread_to_doc(
channel=channel,
thread=filtered_thread,
slack_cleaner=slack_cleaner,
client=client,
user_cache=user_cache,
channel_access=channel_access,
)
return doc, None
def _process_message(
message: MessageType,
client: WebClient,
channel: ChannelType,
slack_cleaner: SlackTextCleaner,
user_cache: dict[str, BasicExpertInfo | None],
seen_thread_ts: set[str],
channel_access: Any | None,
msg_filter_func: Callable[
[MessageType], SlackMessageFilterReason | None
] = default_msg_filter,
) -> ProcessedSlackMessage:
thread_ts = message.get("thread_ts")
thread_or_message_ts = thread_ts or message["ts"]
try:
doc, filter_reason = _message_to_doc(
message=message,
client=client,
channel=channel,
slack_cleaner=slack_cleaner,
user_cache=user_cache,
seen_thread_ts=seen_thread_ts,
channel_access=channel_access,
msg_filter_func=msg_filter_func,
)
return ProcessedSlackMessage(
doc=doc,
thread_or_message_ts=thread_or_message_ts,
filter_reason=filter_reason,
failure=None,
)
except Exception as e:
(logging.exception(f"Error processing message {message['ts']}"))
return ProcessedSlackMessage(
doc=None,
thread_or_message_ts=thread_or_message_ts,
filter_reason=None,
failure=ConnectorFailure(
failed_document=DocumentFailure(
document_id=_build_doc_id(
channel_id=channel["id"], thread_ts=thread_or_message_ts
),
document_link=get_message_link(message, client, channel["id"]),
),
failure_message=str(e),
exception=e,
),
)
def _get_all_doc_ids(
client: WebClient,
channels: list[str] | None = None,
channel_name_regex_enabled: bool = False,
msg_filter_func: Callable[
[MessageType], SlackMessageFilterReason | None
] = default_msg_filter,
callback: Any = None,
) -> GenerateSlimDocumentOutput:
all_channels = get_channels(client)
filtered_channels = filter_channels(
all_channels, channels, channel_name_regex_enabled
)
for channel in filtered_channels:
channel_id = channel["id"]
external_access = None # Simplified version, not handling permissions
channel_message_batches = get_channel_messages(
client=client,
channel=channel,
callback=callback,
)
for message_batch in channel_message_batches:
slim_doc_batch: list[SlimDocument] = []
for message in message_batch:
filter_reason = msg_filter_func(message)
if filter_reason:
continue
slim_doc_batch.append(
SlimDocument(
id=_build_doc_id(
channel_id=channel_id, thread_ts=message["ts"]
),
external_access=external_access,
)
)
yield slim_doc_batch
class SlackConnector(
SlimConnectorWithPermSync,
CredentialsConnector,
CheckpointedConnectorWithPermSync,
):
"""Slack connector"""
def __init__(
self,
channels: list[str] | None = None,
channel_regex_enabled: bool = False,
batch_size: int = INDEX_BATCH_SIZE,
num_threads: int = SLACK_NUM_THREADS,
use_redis: bool = False, # Simplified version, not using Redis
) -> None:
self.channels = channels
self.channel_regex_enabled = channel_regex_enabled
self.batch_size = batch_size
self.num_threads = num_threads
self.client: WebClient | None = None
self.fast_client: WebClient | None = None
self.text_cleaner: SlackTextCleaner | None = None
self.user_cache: dict[str, BasicExpertInfo | None] = {}
self.credentials_provider: Any = None
self.use_redis = use_redis
@property
def channels(self) -> list[str] | None:
return self._channels
@channels.setter
def channels(self, channels: list[str] | None) -> None:
self._channels = (
[channel.removeprefix("#") for channel in channels] if channels else None
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load credentials"""
raise NotImplementedError("Use set_credentials_provider with this connector.")
def set_credentials_provider(self, credentials_provider: Any) -> None:
"""Set credentials provider"""
credentials = credentials_provider.get_credentials()
bot_token = credentials["slack_bot_token"]
# Simplified version, not using Redis
connection_error_retry_handler = ConnectionErrorRetryHandler(
max_retry_count=MAX_RETRIES,
interval_calculator=FixedValueRetryIntervalCalculator(),
error_types=[
URLError,
ConnectionResetError,
RemoteDisconnected,
IncompleteRead,
],
)
self.client = WebClient(
token=bot_token, retry_handlers=[connection_error_retry_handler]
)
# For fast response requests
self.fast_client = WebClient(
token=bot_token, timeout=FAST_TIMEOUT
)
self.text_cleaner = SlackTextCleaner(client=self.client)
self.credentials_provider = credentials_provider
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> GenerateSlimDocumentOutput:
if self.client is None:
raise ConnectorMissingCredentialError("Slack")
return _get_all_doc_ids(
client=self.client,
channels=self.channels,
channel_name_regex_enabled=self.channel_regex_enabled,
callback=callback,
)
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> CheckpointOutput:
"""Load documents from checkpoint"""
# Simplified version, not implementing full checkpoint functionality
logging.warning("Checkpoint functionality not implemented in simplified version")
return []
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> CheckpointOutput:
"""Load documents from checkpoint (with permission sync)"""
# Simplified version, not implementing full checkpoint functionality
logging.warning("Checkpoint functionality not implemented in simplified version")
return []
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
return ConnectorCheckpoint()
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
return ConnectorCheckpoint()
def validate_connector_settings(self) -> None:
"""Validate connector settings"""
if self.fast_client is None:
raise ConnectorMissingCredentialError("Slack credentials not loaded.")
try:
# 1) Validate workspace connection
auth_response = self.fast_client.auth_test()
if not auth_response.get("ok", False):
error_msg = auth_response.get(
"error", "Unknown error from Slack auth_test"
)
raise ConnectorValidationError(f"Failed Slack auth_test: {error_msg}")
# 2) Confirm listing channels functionality works
test_resp = self.fast_client.conversations_list(
limit=1, types=["public_channel"]
)
if not test_resp.get("ok", False):
error_msg = test_resp.get("error", "Unknown error from Slack")
if error_msg == "invalid_auth":
raise ConnectorValidationError(
f"Invalid Slack bot token ({error_msg})."
)
elif error_msg == "not_authed":
raise CredentialExpiredError(
f"Invalid or expired Slack bot token ({error_msg})."
)
raise UnexpectedValidationError(
f"Slack API returned a failure: {error_msg}"
)
except SlackApiError as e:
slack_error = e.response.get("error", "")
if slack_error == "ratelimited":
retry_after = int(e.response.headers.get("Retry-After", 1))
logging.warning(
f"Slack API rate limited during validation. Retry suggested after {retry_after} seconds. "
"Proceeding with validation, but be aware that connector operations might be throttled."
)
return
elif slack_error == "missing_scope":
raise InsufficientPermissionsError(
"Slack bot token lacks the necessary scope to list/access channels. "
"Please ensure your Slack app has 'channels:read' (and/or 'groups:read' for private channels)."
)
elif slack_error == "invalid_auth":
raise CredentialExpiredError(
f"Invalid Slack bot token ({slack_error})."
)
elif slack_error == "not_authed":
raise CredentialExpiredError(
f"Invalid or expired Slack bot token ({slack_error})."
)
raise UnexpectedValidationError(
f"Unexpected Slack error '{slack_error}' during settings validation."
)
except ConnectorValidationError as e:
raise e
except Exception as e:
raise UnexpectedValidationError(
f"Unexpected error during Slack settings validation: {e}"
)
if __name__ == "__main__":
# Example usage
import os
slack_channel = os.environ.get("SLACK_CHANNEL")
connector = SlackConnector(
channels=[slack_channel] if slack_channel else None,
)
# Simplified version, directly using credentials dictionary
credentials = {
"slack_bot_token": os.environ.get("SLACK_BOT_TOKEN", "test-token")
}
class SimpleCredentialsProvider:
def get_credentials(self):
return credentials
provider = SimpleCredentialsProvider()
connector.set_credentials_provider(provider)
try:
connector.validate_connector_settings()
print("Slack connector settings validated successfully")
except Exception as e:
print(f"Validation failed: {e}") | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/exceptions.py | common/data_source/exceptions.py | """Exception class definitions"""
class ConnectorMissingCredentialError(Exception):
"""Missing credentials exception"""
def __init__(self, connector_name: str):
super().__init__(f"Missing credentials for {connector_name}")
class ConnectorValidationError(Exception):
"""Connector validation exception"""
pass
class CredentialExpiredError(Exception):
"""Credential expired exception"""
pass
class InsufficientPermissionsError(Exception):
"""Insufficient permissions exception"""
pass
class UnexpectedValidationError(Exception):
"""Unexpected validation exception"""
pass
class RateLimitTriedTooManyTimesError(Exception):
pass | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/box_connector.py | common/data_source/box_connector.py | """Box connector"""
import logging
from datetime import datetime, timezone
from typing import Any
from box_sdk_gen import BoxClient
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
)
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document, GenerateDocumentsOutput
from common.data_source.utils import get_file_ext
class BoxConnector(LoadConnector, PollConnector):
def __init__(self, folder_id: str, batch_size: int = INDEX_BATCH_SIZE, use_marker: bool = True) -> None:
self.batch_size = batch_size
self.folder_id = "0" if not folder_id else folder_id
self.use_marker = use_marker
def load_credentials(self, auth: Any):
self.box_client = BoxClient(auth=auth)
return None
def validate_connector_settings(self):
if self.box_client is None:
raise ConnectorMissingCredentialError("Box")
try:
self.box_client.users.get_user_me()
except Exception as e:
logging.exception("[Box]: Failed to validate Box credentials")
raise ConnectorValidationError(f"Unexpected error during Box settings validation: {e}")
def _yield_files_recursive(
self,
folder_id,
start: SecondsSinceUnixEpoch | None,
end: SecondsSinceUnixEpoch | None
) -> GenerateDocumentsOutput:
if self.box_client is None:
raise ConnectorMissingCredentialError("Box")
result = self.box_client.folders.get_folder_items(
folder_id=folder_id,
limit=self.batch_size,
usemarker=self.use_marker
)
while True:
batch: list[Document] = []
for entry in result.entries:
if entry.type == 'file' :
file = self.box_client.files.get_file_by_id(
entry.id
)
raw_time = (
getattr(file, "created_at", None)
or getattr(file, "content_created_at", None)
)
if raw_time:
modified_time = self._box_datetime_to_epoch_seconds(raw_time)
if start is not None and modified_time <= start:
continue
if end is not None and modified_time > end:
continue
content_bytes = self.box_client.downloads.download_file(file.id)
batch.append(
Document(
id=f"box:{file.id}",
blob=content_bytes.read(),
source=DocumentSource.BOX,
semantic_identifier=file.name,
extension=get_file_ext(file.name),
doc_updated_at=modified_time,
size_bytes=file.size,
metadata=file.metadata
)
)
elif entry.type == 'folder':
yield from self._yield_files_recursive(folder_id=entry.id, start=start, end=end)
if batch:
yield batch
if not result.next_marker:
break
result = self.box_client.folders.get_folder_items(
folder_id=folder_id,
limit=self.batch_size,
marker=result.next_marker,
usemarker=True
)
def _box_datetime_to_epoch_seconds(self, dt: datetime) -> SecondsSinceUnixEpoch:
"""Convert a Box SDK datetime to Unix epoch seconds (UTC).
Only supports datetime; any non-datetime should be filtered out by caller.
"""
if not isinstance(dt, datetime):
raise TypeError(f"box_datetime_to_epoch_seconds expects datetime, got {type(dt)}")
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
else:
dt = dt.astimezone(timezone.utc)
return SecondsSinceUnixEpoch(int(dt.timestamp()))
def poll_source(self, start, end):
return self._yield_files_recursive(folder_id=self.folder_id, start=start, end=end)
def load_from_state(self):
return self._yield_files_recursive(folder_id=self.folder_id, start=None, end=None)
# from flask import Flask, request, redirect
# from box_sdk_gen import BoxClient, BoxOAuth, OAuthConfig, GetAuthorizeUrlOptions
# app = Flask(__name__)
# AUTH = BoxOAuth(
# OAuthConfig(client_id="8suvn9ik7qezsq2dub0ye6ubox61081z", client_secret="QScvhLgBcZrb2ck1QP1ovkutpRhI2QcN")
# )
# @app.route("/")
# def get_auth():
# auth_url = AUTH.get_authorize_url(
# options=GetAuthorizeUrlOptions(redirect_uri="http://localhost:4999/oauth2callback")
# )
# return redirect(auth_url, code=302)
# @app.route("/oauth2callback")
# def callback():
# AUTH.get_tokens_authorization_code_grant(request.args.get("code"))
# box = BoxConnector()
# box.load_credentials({"auth": AUTH})
# lst = []
# for file in box.load_from_state():
# for f in file:
# lst.append(f.semantic_identifier)
# return lst
if __name__ == "__main__":
pass
# app.run(port=4999) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/asana_connector.py | common/data_source/asana_connector.py | from collections.abc import Iterator
import time
from datetime import datetime
import logging
from typing import Any, Dict
import asana
import requests
from common.data_source.config import CONTINUE_ON_CONNECTOR_FAILURE, INDEX_BATCH_SIZE, DocumentSource
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, GenerateDocumentsOutput, SecondsSinceUnixEpoch
from common.data_source.utils import extract_size_bytes, get_file_ext
# https://github.com/Asana/python-asana/tree/master?tab=readme-ov-file#documentation-for-api-endpoints
class AsanaTask:
def __init__(
self,
id: str,
title: str,
text: str,
link: str,
last_modified: datetime,
project_gid: str,
project_name: str,
) -> None:
self.id = id
self.title = title
self.text = text
self.link = link
self.last_modified = last_modified
self.project_gid = project_gid
self.project_name = project_name
def __str__(self) -> str:
return f"ID: {self.id}\nTitle: {self.title}\nLast modified: {self.last_modified}\nText: {self.text}"
class AsanaAPI:
def __init__(
self, api_token: str, workspace_gid: str, team_gid: str | None
) -> None:
self._user = None
self.workspace_gid = workspace_gid
self.team_gid = team_gid
self.configuration = asana.Configuration()
self.api_client = asana.ApiClient(self.configuration)
self.tasks_api = asana.TasksApi(self.api_client)
self.attachments_api = asana.AttachmentsApi(self.api_client)
self.stories_api = asana.StoriesApi(self.api_client)
self.users_api = asana.UsersApi(self.api_client)
self.project_api = asana.ProjectsApi(self.api_client)
self.project_memberships_api = asana.ProjectMembershipsApi(self.api_client)
self.workspaces_api = asana.WorkspacesApi(self.api_client)
self.api_error_count = 0
self.configuration.access_token = api_token
self.task_count = 0
def get_tasks(
self, project_gids: list[str] | None, start_date: str
) -> Iterator[AsanaTask]:
"""Get all tasks from the projects with the given gids that were modified since the given date.
If project_gids is None, get all tasks from all projects in the workspace."""
logging.info("Starting to fetch Asana projects")
projects = self.project_api.get_projects(
opts={
"workspace": self.workspace_gid,
"opt_fields": "gid,name,archived,modified_at",
}
)
start_seconds = int(time.mktime(datetime.now().timetuple()))
projects_list = []
project_count = 0
for project_info in projects:
project_gid = project_info["gid"]
if project_gids is None or project_gid in project_gids:
projects_list.append(project_gid)
else:
logging.debug(
f"Skipping project: {project_gid} - not in accepted project_gids"
)
project_count += 1
if project_count % 100 == 0:
logging.info(f"Processed {project_count} projects")
logging.info(f"Found {len(projects_list)} projects to process")
for project_gid in projects_list:
for task in self._get_tasks_for_project(
project_gid, start_date, start_seconds
):
yield task
logging.info(f"Completed fetching {self.task_count} tasks from Asana")
if self.api_error_count > 0:
logging.warning(
f"Encountered {self.api_error_count} API errors during task fetching"
)
def _get_tasks_for_project(
self, project_gid: str, start_date: str, start_seconds: int
) -> Iterator[AsanaTask]:
project = self.project_api.get_project(project_gid, opts={})
project_name = project.get("name", project_gid)
team = project.get("team") or {}
team_gid = team.get("gid")
if project.get("archived"):
logging.info(f"Skipping archived project: {project_name} ({project_gid})")
return
if not team_gid:
logging.info(
f"Skipping project without a team: {project_name} ({project_gid})"
)
return
if project.get("privacy_setting") == "private":
if self.team_gid and team_gid != self.team_gid:
logging.info(
f"Skipping private project not in configured team: {project_name} ({project_gid})"
)
return
logging.info(
f"Processing private project in configured team: {project_name} ({project_gid})"
)
simple_start_date = start_date.split(".")[0].split("+")[0]
logging.info(
f"Fetching tasks modified since {simple_start_date} for project: {project_name} ({project_gid})"
)
opts = {
"opt_fields": "name,memberships,memberships.project,completed_at,completed_by,created_at,"
"created_by,custom_fields,dependencies,due_at,due_on,external,html_notes,liked,likes,"
"modified_at,notes,num_hearts,parent,projects,resource_subtype,resource_type,start_on,"
"workspace,permalink_url",
"modified_since": start_date,
}
tasks_from_api = self.tasks_api.get_tasks_for_project(project_gid, opts)
for data in tasks_from_api:
self.task_count += 1
if self.task_count % 10 == 0:
end_seconds = time.mktime(datetime.now().timetuple())
runtime_seconds = end_seconds - start_seconds
if runtime_seconds > 0:
logging.info(
f"Processed {self.task_count} tasks in {runtime_seconds:.0f} seconds "
f"({self.task_count / runtime_seconds:.2f} tasks/second)"
)
logging.debug(f"Processing Asana task: {data['name']}")
text = self._construct_task_text(data)
try:
text += self._fetch_and_add_comments(data["gid"])
last_modified_date = self.format_date(data["modified_at"])
text += f"Last modified: {last_modified_date}\n"
task = AsanaTask(
id=data["gid"],
title=data["name"],
text=text,
link=data["permalink_url"],
last_modified=datetime.fromisoformat(data["modified_at"]),
project_gid=project_gid,
project_name=project_name,
)
yield task
except Exception:
logging.error(
f"Error processing task {data['gid']} in project {project_gid}",
exc_info=True,
)
self.api_error_count += 1
def _construct_task_text(self, data: Dict) -> str:
text = f"{data['name']}\n\n"
if data["notes"]:
text += f"{data['notes']}\n\n"
if data["created_by"] and data["created_by"]["gid"]:
creator = self.get_user(data["created_by"]["gid"])["name"]
created_date = self.format_date(data["created_at"])
text += f"Created by: {creator} on {created_date}\n"
if data["due_on"]:
due_date = self.format_date(data["due_on"])
text += f"Due date: {due_date}\n"
if data["completed_at"]:
completed_date = self.format_date(data["completed_at"])
text += f"Completed on: {completed_date}\n"
text += "\n"
return text
def _fetch_and_add_comments(self, task_gid: str) -> str:
text = ""
stories_opts: Dict[str, str] = {}
story_start = time.time()
stories = self.stories_api.get_stories_for_task(task_gid, stories_opts)
story_count = 0
comment_count = 0
for story in stories:
story_count += 1
if story["resource_subtype"] == "comment_added":
comment = self.stories_api.get_story(
story["gid"], opts={"opt_fields": "text,created_by,created_at"}
)
commenter = self.get_user(comment["created_by"]["gid"])["name"]
text += f"Comment by {commenter}: {comment['text']}\n\n"
comment_count += 1
story_duration = time.time() - story_start
logging.debug(
f"Processed {story_count} stories (including {comment_count} comments) in {story_duration:.2f} seconds"
)
return text
def get_attachments(self, task_gid: str) -> list[dict]:
"""
Fetch full attachment info (including download_url) for a task.
"""
attachments: list[dict] = []
try:
# Step 1: list attachment compact records
for att in self.attachments_api.get_attachments_for_object(
parent=task_gid,
opts={}
):
gid = att.get("gid")
if not gid:
continue
try:
# Step 2: expand to full attachment
full = self.attachments_api.get_attachment(
attachment_gid=gid,
opts={
"opt_fields": "name,download_url,size,created_at"
}
)
if full.get("download_url"):
attachments.append(full)
except Exception:
logging.exception(
f"Failed to fetch attachment detail {gid} for task {task_gid}"
)
self.api_error_count += 1
except Exception:
logging.exception(f"Failed to list attachments for task {task_gid}")
self.api_error_count += 1
return attachments
def get_accessible_emails(
self,
workspace_id: str,
project_ids: list[str] | None,
team_id: str | None,
):
ws_users = self.users_api.get_users(
opts={
"workspace": workspace_id,
"opt_fields": "gid,name,email"
}
)
workspace_users = {
u["gid"]: u.get("email")
for u in ws_users
if u.get("email")
}
if not project_ids:
return set(workspace_users.values())
project_emails = set()
for pid in project_ids:
project = self.project_api.get_project(
pid,
opts={"opt_fields": "team,privacy_setting"}
)
if project["privacy_setting"] == "private":
if team_id and project.get("team", {}).get("gid") != team_id:
continue
memberships = self.project_memberships_api.get_project_membership(
pid,
opts={"opt_fields": "user.gid,user.email"}
)
for m in memberships:
email = m["user"].get("email")
if email:
project_emails.add(email)
return project_emails
def get_user(self, user_gid: str) -> Dict:
if self._user is not None:
return self._user
self._user = self.users_api.get_user(user_gid, {"opt_fields": "name,email"})
if not self._user:
logging.warning(f"Unable to fetch user information for user_gid: {user_gid}")
return {"name": "Unknown"}
return self._user
def format_date(self, date_str: str) -> str:
date = datetime.fromisoformat(date_str)
return time.strftime("%Y-%m-%d", date.timetuple())
def get_time(self) -> str:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class AsanaConnector(LoadConnector, PollConnector):
def __init__(
self,
asana_workspace_id: str,
asana_project_ids: str | None = None,
asana_team_id: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
continue_on_failure: bool = CONTINUE_ON_CONNECTOR_FAILURE,
) -> None:
self.workspace_id = asana_workspace_id
self.project_ids_to_index: list[str] | None = (
asana_project_ids.split(",") if asana_project_ids else None
)
self.asana_team_id = asana_team_id if asana_team_id else None
self.batch_size = batch_size
self.continue_on_failure = continue_on_failure
self.size_threshold = None
logging.info(
f"AsanaConnector initialized with workspace_id: {asana_workspace_id}"
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self.api_token = credentials["asana_api_token_secret"]
self.asana_client = AsanaAPI(
api_token=self.api_token,
workspace_gid=self.workspace_id,
team_gid=self.asana_team_id,
)
self.workspace_users_email = self.asana_client.get_accessible_emails(self.workspace_id, self.project_ids_to_index, self.asana_team_id)
logging.info("Asana credentials loaded and API client initialized")
return None
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch | None
) -> GenerateDocumentsOutput:
start_time = datetime.fromtimestamp(start).isoformat()
logging.info(f"Starting Asana poll from {start_time}")
docs_batch: list[Document] = []
tasks = self.asana_client.get_tasks(self.project_ids_to_index, start_time)
for task in tasks:
docs = self._task_to_documents(task)
docs_batch.extend(docs)
if len(docs_batch) >= self.batch_size:
logging.info(f"Yielding batch of {len(docs_batch)} documents")
yield docs_batch
docs_batch = []
if docs_batch:
logging.info(f"Yielding final batch of {len(docs_batch)} documents")
yield docs_batch
logging.info("Asana poll completed")
def load_from_state(self) -> GenerateDocumentsOutput:
logging.info("Starting full index of all Asana tasks")
return self.poll_source(start=0, end=None)
def _task_to_documents(self, task: AsanaTask) -> list[Document]:
docs: list[Document] = []
attachments = self.asana_client.get_attachments(task.id)
for att in attachments:
try:
resp = requests.get(att["download_url"], timeout=30)
resp.raise_for_status()
file_blob = resp.content
filename = att.get("name", "attachment")
size_bytes = extract_size_bytes(att)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{filename} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
docs.append(
Document(
id=f"asana:{task.id}:{att['gid']}",
blob=file_blob,
extension=get_file_ext(filename) or "",
size_bytes=size_bytes,
doc_updated_at=task.last_modified,
source=DocumentSource.ASANA,
semantic_identifier=filename,
primary_owners=list(self.workspace_users_email),
)
)
except Exception:
logging.exception(
f"Failed to download attachment {att.get('gid')} for task {task.id}"
)
return docs
if __name__ == "__main__":
import time
import os
logging.info("Starting Asana connector test")
connector = AsanaConnector(
os.environ["WORKSPACE_ID"],
os.environ["PROJECT_IDS"],
os.environ["TEAM_ID"],
)
connector.load_credentials(
{
"asana_api_token_secret": os.environ["API_TOKEN"],
}
)
logging.info("Loading all documents from Asana")
all_docs = connector.load_from_state()
current = time.time()
one_day_ago = current - 24 * 60 * 60 # 1 day
logging.info("Polling for documents updated in the last 24 hours")
latest_docs = connector.poll_source(one_day_ago, current)
for docs in all_docs:
for doc in docs:
print(doc.id)
logging.info("Asana connector test completed") | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/gitlab_connector.py | common/data_source/gitlab_connector.py | import fnmatch
import itertools
from collections import deque
from collections.abc import Iterable
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
from typing import TypeVar
import gitlab
from gitlab.v4.objects import Project
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE
from common.data_source.exceptions import ConnectorMissingCredentialError
from common.data_source.exceptions import ConnectorValidationError
from common.data_source.exceptions import CredentialExpiredError
from common.data_source.exceptions import InsufficientPermissionsError
from common.data_source.exceptions import UnexpectedValidationError
from common.data_source.interfaces import GenerateDocumentsOutput
from common.data_source.interfaces import LoadConnector
from common.data_source.interfaces import PollConnector
from common.data_source.interfaces import SecondsSinceUnixEpoch
from common.data_source.models import BasicExpertInfo
from common.data_source.models import Document
from common.data_source.utils import get_file_ext
T = TypeVar("T")
# List of directories/Files to exclude
exclude_patterns = [
"logs",
".github/",
".gitlab/",
".pre-commit-config.yaml",
]
def _batch_gitlab_objects(git_objs: Iterable[T], batch_size: int) -> Iterator[list[T]]:
it = iter(git_objs)
while True:
batch = list(itertools.islice(it, batch_size))
if not batch:
break
yield batch
def get_author(author: Any) -> BasicExpertInfo:
return BasicExpertInfo(
display_name=author.get("name"),
)
def _convert_merge_request_to_document(mr: Any) -> Document:
mr_text = mr.description or ""
doc = Document(
id=mr.web_url,
blob=mr_text,
source=DocumentSource.GITLAB,
semantic_identifier=mr.title,
extension=".md",
# updated_at is UTC time but is timezone unaware, explicitly add UTC
# as there is logic in indexing to prevent wrong timestamped docs
# due to local time discrepancies with UTC
doc_updated_at=mr.updated_at.replace(tzinfo=timezone.utc),
size_bytes=len(mr_text.encode("utf-8")),
primary_owners=[get_author(mr.author)],
metadata={"state": mr.state, "type": "MergeRequest", "web_url": mr.web_url},
)
return doc
def _convert_issue_to_document(issue: Any) -> Document:
issue_text = issue.description or ""
doc = Document(
id=issue.web_url,
blob=issue_text,
source=DocumentSource.GITLAB,
semantic_identifier=issue.title,
extension=".md",
# updated_at is UTC time but is timezone unaware, explicitly add UTC
# as there is logic in indexing to prevent wrong timestamped docs
# due to local time discrepancies with UTC
doc_updated_at=issue.updated_at.replace(tzinfo=timezone.utc),
size_bytes=len(issue_text.encode("utf-8")),
primary_owners=[get_author(issue.author)],
metadata={
"state": issue.state,
"type": issue.type if issue.type else "Issue",
"web_url": issue.web_url,
},
)
return doc
def _convert_code_to_document(
project: Project, file: Any, url: str, projectName: str, projectOwner: str
) -> Document:
# Dynamically get the default branch from the project object
default_branch = project.default_branch
# Fetch the file content using the correct branch
file_content_obj = project.files.get(
file_path=file["path"], ref=default_branch # Use the default branch
)
# BoxConnector uses raw bytes for blob. Keep the same here.
file_content_bytes = file_content_obj.decode()
file_url = f"{url}/{projectOwner}/{projectName}/-/blob/{default_branch}/{file['path']}"
# Try to use the last commit timestamp for incremental sync.
# Falls back to "now" if the commit lookup fails.
last_commit_at = None
try:
# Query commit history for this file on the default branch.
commits = project.commits.list(
ref_name=default_branch,
path=file["path"],
per_page=1,
)
if commits:
# committed_date is ISO string like "2024-01-01T00:00:00.000+00:00"
committed_date = commits[0].committed_date
if isinstance(committed_date, str):
last_commit_at = datetime.strptime(
committed_date, "%Y-%m-%dT%H:%M:%S.%f%z"
).astimezone(timezone.utc)
elif isinstance(committed_date, datetime):
last_commit_at = committed_date.astimezone(timezone.utc)
except Exception:
last_commit_at = None
# Create and return a Document object
doc = Document(
# Use a stable ID so reruns don't create duplicates.
id=file_url,
blob=file_content_bytes,
source=DocumentSource.GITLAB,
semantic_identifier=file.get("name"),
extension=get_file_ext(file.get("name")),
doc_updated_at=last_commit_at or datetime.now(tz=timezone.utc),
size_bytes=len(file_content_bytes) if file_content_bytes is not None else 0,
primary_owners=[], # Add owners if needed
metadata={
"type": "CodeFile",
"path": file.get("path"),
"ref": default_branch,
"project": f"{projectOwner}/{projectName}",
"web_url": file_url,
},
)
return doc
def _should_exclude(path: str) -> bool:
"""Check if a path matches any of the exclude patterns."""
return any(fnmatch.fnmatch(path, pattern) for pattern in exclude_patterns)
class GitlabConnector(LoadConnector, PollConnector):
def __init__(
self,
project_owner: str,
project_name: str,
batch_size: int = INDEX_BATCH_SIZE,
state_filter: str = "all",
include_mrs: bool = True,
include_issues: bool = True,
include_code_files: bool = False,
) -> None:
self.project_owner = project_owner
self.project_name = project_name
self.batch_size = batch_size
self.state_filter = state_filter
self.include_mrs = include_mrs
self.include_issues = include_issues
self.include_code_files = include_code_files
self.gitlab_client: gitlab.Gitlab | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self.gitlab_client = gitlab.Gitlab(
credentials["gitlab_url"], private_token=credentials["gitlab_access_token"]
)
return None
def validate_connector_settings(self) -> None:
if self.gitlab_client is None:
raise ConnectorMissingCredentialError("GitLab")
try:
self.gitlab_client.auth()
self.gitlab_client.projects.get(
f"{self.project_owner}/{self.project_name}",
lazy=True,
)
except gitlab.exceptions.GitlabAuthenticationError as e:
raise CredentialExpiredError(
"Invalid or expired GitLab credentials."
) from e
except gitlab.exceptions.GitlabAuthorizationError as e:
raise InsufficientPermissionsError(
"Insufficient permissions to access GitLab resources."
) from e
except gitlab.exceptions.GitlabGetError as e:
raise ConnectorValidationError(
"GitLab project not found or not accessible."
) from e
except Exception as e:
raise UnexpectedValidationError(
f"Unexpected error while validating GitLab settings: {e}"
) from e
def _fetch_from_gitlab(
self, start: datetime | None = None, end: datetime | None = None
) -> GenerateDocumentsOutput:
if self.gitlab_client is None:
raise ConnectorMissingCredentialError("Gitlab")
project: Project = self.gitlab_client.projects.get(
f"{self.project_owner}/{self.project_name}"
)
start_utc = start.astimezone(timezone.utc) if start else None
end_utc = end.astimezone(timezone.utc) if end else None
# Fetch code files
if self.include_code_files:
# Fetching using BFS as project.report_tree with recursion causing slow load
queue = deque([""]) # Start with the root directory
while queue:
current_path = queue.popleft()
files = project.repository_tree(path=current_path, all=True)
for file_batch in _batch_gitlab_objects(files, self.batch_size):
code_doc_batch: list[Document] = []
for file in file_batch:
if _should_exclude(file["path"]):
continue
if file["type"] == "blob":
doc = _convert_code_to_document(
project,
file,
self.gitlab_client.url,
self.project_name,
self.project_owner,
)
# Apply incremental window filtering for code files too.
if start_utc is not None and doc.doc_updated_at <= start_utc:
continue
if end_utc is not None and doc.doc_updated_at > end_utc:
continue
code_doc_batch.append(doc)
elif file["type"] == "tree":
queue.append(file["path"])
if code_doc_batch:
yield code_doc_batch
if self.include_mrs:
merge_requests = project.mergerequests.list(
state=self.state_filter,
order_by="updated_at",
sort="desc",
iterator=True,
)
for mr_batch in _batch_gitlab_objects(merge_requests, self.batch_size):
mr_doc_batch: list[Document] = []
for mr in mr_batch:
mr.updated_at = datetime.strptime(
mr.updated_at, "%Y-%m-%dT%H:%M:%S.%f%z"
)
if start_utc is not None and mr.updated_at <= start_utc:
yield mr_doc_batch
return
if end_utc is not None and mr.updated_at > end_utc:
continue
mr_doc_batch.append(_convert_merge_request_to_document(mr))
yield mr_doc_batch
if self.include_issues:
issues = project.issues.list(state=self.state_filter, iterator=True)
for issue_batch in _batch_gitlab_objects(issues, self.batch_size):
issue_doc_batch: list[Document] = []
for issue in issue_batch:
issue.updated_at = datetime.strptime(
issue.updated_at, "%Y-%m-%dT%H:%M:%S.%f%z"
)
# Avoid re-syncing the last-seen item.
if start_utc is not None and issue.updated_at <= start_utc:
yield issue_doc_batch
return
if end_utc is not None and issue.updated_at > end_utc:
continue
issue_doc_batch.append(_convert_issue_to_document(issue))
yield issue_doc_batch
def load_from_state(self) -> GenerateDocumentsOutput:
return self._fetch_from_gitlab()
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
return self._fetch_from_gitlab(start_datetime, end_datetime)
if __name__ == "__main__":
import os
connector = GitlabConnector(
# gitlab_url="https://gitlab.com/api/v4",
project_owner=os.environ["PROJECT_OWNER"],
project_name=os.environ["PROJECT_NAME"],
batch_size=INDEX_BATCH_SIZE,
state_filter="all",
include_mrs=True,
include_issues=True,
include_code_files=True,
)
connector.load_credentials(
{
"gitlab_access_token": os.environ["GITLAB_ACCESS_TOKEN"],
"gitlab_url": os.environ["GITLAB_URL"],
}
)
document_batches = connector.load_from_state()
for f in document_batches:
print("Batch:", f)
print("Finished loading from state.") | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/utils.py | common/data_source/utils.py | """Utility functions for all connectors"""
import base64
import contextvars
import json
import logging
import math
import os
import re
import threading
import time
from collections.abc import Callable, Generator, Iterator, Mapping, Sequence
from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, as_completed, wait
from datetime import datetime, timedelta, timezone
from functools import lru_cache, wraps
from io import BytesIO
from itertools import islice
from numbers import Integral
from pathlib import Path
from typing import IO, Any, Generic, Iterable, Optional, Protocol, TypeVar, cast
from urllib.parse import parse_qs, quote, urljoin, urlparse
import boto3
import chardet
import requests
from botocore.client import Config
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
from googleapiclient.errors import HttpError
from mypy_boto3_s3 import S3Client
from retry import retry
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from slack_sdk.web import SlackResponse
from common.data_source.config import (
_ITERATION_LIMIT,
_NOTION_CALL_TIMEOUT,
_SLACK_LIMIT,
CONFLUENCE_OAUTH_TOKEN_URL,
DOWNLOAD_CHUNK_SIZE,
EXCLUDED_IMAGE_TYPES,
RATE_LIMIT_MESSAGE_LOWERCASE,
SIZE_THRESHOLD_BUFFER,
BlobType,
)
from common.data_source.exceptions import RateLimitTriedTooManyTimesError
from common.data_source.interfaces import CT, CheckpointedConnector, CheckpointOutputWrapper, ConfluenceUser, LoadFunction, OnyxExtensionType, SecondsSinceUnixEpoch, TokenResponse
from common.data_source.models import BasicExpertInfo, Document
_TZ_SUFFIX_PATTERN = re.compile(r"([+-])([\d:]+)$")
def datetime_from_string(datetime_string: str) -> datetime:
datetime_string = datetime_string.strip()
match_jira_format = _TZ_SUFFIX_PATTERN.search(datetime_string)
if match_jira_format:
sign, tz_field = match_jira_format.groups()
digits = tz_field.replace(":", "")
if digits.isdigit() and 1 <= len(digits) <= 4:
if len(digits) >= 3:
hours = digits[:-2].rjust(2, "0")
minutes = digits[-2:]
else:
hours = digits.rjust(2, "0")
minutes = "00"
normalized = f"{sign}{hours}:{minutes}"
datetime_string = f"{datetime_string[: match_jira_format.start()]}{normalized}"
# Handle the case where the datetime string ends with 'Z' (Zulu time)
if datetime_string.endswith("Z"):
datetime_string = datetime_string[:-1] + "+00:00"
# Handle timezone format "+0000" -> "+00:00"
if datetime_string.endswith("+0000"):
datetime_string = datetime_string[:-5] + "+00:00"
datetime_object = datetime.fromisoformat(datetime_string)
if datetime_object.tzinfo is None:
# If no timezone info, assume it is UTC
datetime_object = datetime_object.replace(tzinfo=timezone.utc)
else:
# If not in UTC, translate it
datetime_object = datetime_object.astimezone(timezone.utc)
return datetime_object
def is_valid_image_type(mime_type: str) -> bool:
"""
Check if mime_type is a valid image type.
Args:
mime_type: The MIME type to check
Returns:
True if the MIME type is a valid image type, False otherwise
"""
return bool(mime_type) and mime_type.startswith("image/") and mime_type not in EXCLUDED_IMAGE_TYPES
"""If you want to allow the external service to tell you when you've hit the rate limit,
use the following instead"""
R = TypeVar("R", bound=Callable[..., requests.Response])
def _handle_http_error(e: requests.HTTPError, attempt: int) -> int:
MIN_DELAY = 2
MAX_DELAY = 60
STARTING_DELAY = 5
BACKOFF = 2
# Check if the response or headers are None to avoid potential AttributeError
if e.response is None or e.response.headers is None:
logging.warning("HTTPError with `None` as response or as headers")
raise e
# Confluence Server returns 403 when rate limited
if e.response.status_code == 403:
FORBIDDEN_MAX_RETRY_ATTEMPTS = 7
FORBIDDEN_RETRY_DELAY = 10
if attempt < FORBIDDEN_MAX_RETRY_ATTEMPTS:
logging.warning(f"403 error. This sometimes happens when we hit Confluence rate limits. Retrying in {FORBIDDEN_RETRY_DELAY} seconds...")
return FORBIDDEN_RETRY_DELAY
raise e
if e.response.status_code != 429 and RATE_LIMIT_MESSAGE_LOWERCASE not in e.response.text.lower():
raise e
retry_after = None
retry_after_header = e.response.headers.get("Retry-After")
if retry_after_header is not None:
try:
retry_after = int(retry_after_header)
if retry_after > MAX_DELAY:
logging.warning(f"Clamping retry_after from {retry_after} to {MAX_DELAY} seconds...")
retry_after = MAX_DELAY
if retry_after < MIN_DELAY:
retry_after = MIN_DELAY
except ValueError:
pass
if retry_after is not None:
logging.warning(f"Rate limiting with retry header. Retrying after {retry_after} seconds...")
delay = retry_after
else:
logging.warning("Rate limiting without retry header. Retrying with exponential backoff...")
delay = min(STARTING_DELAY * (BACKOFF**attempt), MAX_DELAY)
delay_until = math.ceil(time.monotonic() + delay)
return delay_until
def update_param_in_path(path: str, param: str, value: str) -> str:
"""Update a parameter in a path. Path should look something like:
/api/rest/users?start=0&limit=10
"""
parsed_url = urlparse(path)
query_params = parse_qs(parsed_url.query)
query_params[param] = [value]
return path.split("?")[0] + "?" + "&".join(f"{k}={quote(v[0])}" for k, v in query_params.items())
def build_confluence_document_id(base_url: str, content_url: str, is_cloud: bool) -> str:
"""For confluence, the document id is the page url for a page based document
or the attachment download url for an attachment based document
Args:
base_url (str): The base url of the Confluence instance
content_url (str): The url of the page or attachment download url
Returns:
str: The document id
"""
# NOTE: urljoin is tricky and will drop the last segment of the base if it doesn't
# end with "/" because it believes that makes it a file.
final_url = base_url.rstrip("/") + "/"
if is_cloud and not final_url.endswith("/wiki/"):
final_url = urljoin(final_url, "wiki") + "/"
final_url = urljoin(final_url, content_url.lstrip("/"))
return final_url
def get_single_param_from_url(url: str, param: str) -> str | None:
"""Get a parameter from a url"""
parsed_url = urlparse(url)
return parse_qs(parsed_url.query).get(param, [None])[0]
def get_start_param_from_url(url: str) -> int:
"""Get the start parameter from a url"""
start_str = get_single_param_from_url(url, "start")
return int(start_str) if start_str else 0
def wrap_request_to_handle_ratelimiting(request_fn: R, default_wait_time_sec: int = 30, max_waits: int = 30) -> R:
def wrapped_request(*args: list, **kwargs: dict[str, Any]) -> requests.Response:
for _ in range(max_waits):
response = request_fn(*args, **kwargs)
if response.status_code == 429:
try:
wait_time = int(response.headers.get("Retry-After", default_wait_time_sec))
except ValueError:
wait_time = default_wait_time_sec
time.sleep(wait_time)
continue
return response
raise RateLimitTriedTooManyTimesError(f"Exceeded '{max_waits}' retries")
return cast(R, wrapped_request)
_rate_limited_get = wrap_request_to_handle_ratelimiting(requests.get)
_rate_limited_post = wrap_request_to_handle_ratelimiting(requests.post)
class _RateLimitedRequest:
get = _rate_limited_get
post = _rate_limited_post
rl_requests = _RateLimitedRequest
# Blob Storage Utilities
def create_s3_client(bucket_type: BlobType, credentials: dict[str, Any], european_residency: bool = False) -> S3Client:
"""Create S3 client for different blob storage types"""
if bucket_type == BlobType.R2:
subdomain = "eu." if european_residency else ""
endpoint_url = f"https://{credentials['account_id']}.{subdomain}r2.cloudflarestorage.com"
return boto3.client(
"s3",
endpoint_url=endpoint_url,
aws_access_key_id=credentials["r2_access_key_id"],
aws_secret_access_key=credentials["r2_secret_access_key"],
region_name="auto",
config=Config(signature_version="s3v4"),
)
elif bucket_type == BlobType.S3:
authentication_method = credentials.get("authentication_method", "access_key")
region_name = credentials.get("region") or None
if authentication_method == "access_key":
session = boto3.Session(
aws_access_key_id=credentials["aws_access_key_id"],
aws_secret_access_key=credentials["aws_secret_access_key"],
region_name=region_name,
)
return session.client("s3", region_name=region_name)
elif authentication_method == "iam_role":
role_arn = credentials["aws_role_arn"]
def _refresh_credentials() -> dict[str, str]:
sts_client = boto3.client("sts", region_name=credentials.get("region") or None)
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=f"onyx_blob_storage_{int(datetime.now().timestamp())}",
)
creds = assumed_role_object["Credentials"]
return {
"access_key": creds["AccessKeyId"],
"secret_key": creds["SecretAccessKey"],
"token": creds["SessionToken"],
"expiry_time": creds["Expiration"].isoformat(),
}
refreshable = RefreshableCredentials.create_from_metadata(
metadata=_refresh_credentials(),
refresh_using=_refresh_credentials,
method="sts-assume-role",
)
botocore_session = get_session()
botocore_session._credentials = refreshable
session = boto3.Session(botocore_session=botocore_session, region_name=region_name)
return session.client("s3", region_name=region_name)
elif authentication_method == "assume_role":
return boto3.client("s3", region_name=region_name)
else:
raise ValueError("Invalid authentication method for S3.")
elif bucket_type == BlobType.GOOGLE_CLOUD_STORAGE:
return boto3.client(
"s3",
endpoint_url="https://storage.googleapis.com",
aws_access_key_id=credentials["access_key_id"],
aws_secret_access_key=credentials["secret_access_key"],
region_name="auto",
)
elif bucket_type == BlobType.OCI_STORAGE:
return boto3.client(
"s3",
endpoint_url=f"https://{credentials['namespace']}.compat.objectstorage.{credentials['region']}.oraclecloud.com",
aws_access_key_id=credentials["access_key_id"],
aws_secret_access_key=credentials["secret_access_key"],
region_name=credentials["region"],
)
elif bucket_type == BlobType.S3_COMPATIBLE:
addressing_style = credentials.get("addressing_style", "virtual")
return boto3.client(
"s3",
endpoint_url=credentials["endpoint_url"],
aws_access_key_id=credentials["aws_access_key_id"],
aws_secret_access_key=credentials["aws_secret_access_key"],
config=Config(s3={'addressing_style': addressing_style}),
)
else:
raise ValueError(f"Unsupported bucket type: {bucket_type}")
def detect_bucket_region(s3_client: S3Client, bucket_name: str) -> str | None:
"""Detect bucket region"""
try:
response = s3_client.head_bucket(Bucket=bucket_name)
bucket_region = response.get("BucketRegion") or response.get("ResponseMetadata", {}).get("HTTPHeaders", {}).get("x-amz-bucket-region")
if bucket_region:
logging.debug(f"Detected bucket region: {bucket_region}")
else:
logging.warning("Bucket region not found in head_bucket response")
return bucket_region
except Exception as e:
logging.warning(f"Failed to detect bucket region via head_bucket: {e}")
return None
def download_object(s3_client: S3Client, bucket_name: str, key: str, size_threshold: int | None = None) -> bytes | None:
"""Download object from blob storage"""
response = s3_client.get_object(Bucket=bucket_name, Key=key)
body = response["Body"]
try:
if size_threshold is None:
return body.read()
return read_stream_with_limit(body, key, size_threshold)
finally:
body.close()
def read_stream_with_limit(body: Any, key: str, size_threshold: int) -> bytes | None:
"""Read stream with size limit"""
bytes_read = 0
chunks: list[bytes] = []
chunk_size = min(DOWNLOAD_CHUNK_SIZE, size_threshold + SIZE_THRESHOLD_BUFFER)
for chunk in body.iter_chunks(chunk_size=chunk_size):
if not chunk:
continue
chunks.append(chunk)
bytes_read += len(chunk)
if bytes_read > size_threshold + SIZE_THRESHOLD_BUFFER:
logging.warning(f"{key} exceeds size threshold of {size_threshold}. Skipping.")
return None
return b"".join(chunks)
def _extract_onyx_metadata(line: str) -> dict | None:
"""
Example: first line has:
<!-- ONYX_METADATA={"title": "..."} -->
or
#ONYX_METADATA={"title":"..."}
"""
html_comment_pattern = r"<!--\s*ONYX_METADATA=\{(.*?)\}\s*-->"
hashtag_pattern = r"#ONYX_METADATA=\{(.*?)\}"
html_comment_match = re.search(html_comment_pattern, line)
hashtag_match = re.search(hashtag_pattern, line)
if html_comment_match:
json_str = html_comment_match.group(1)
elif hashtag_match:
json_str = hashtag_match.group(1)
else:
return None
try:
return json.loads("{" + json_str + "}")
except json.JSONDecodeError:
return None
def read_text_file(
file: IO,
encoding: str = "utf-8",
errors: str = "replace",
ignore_onyx_metadata: bool = True,
) -> tuple[str, dict]:
"""
For plain text files. Optionally extracts Onyx metadata from the first line.
"""
metadata = {}
file_content_raw = ""
for ind, line in enumerate(file):
# decode
try:
line = line.decode(encoding) if isinstance(line, bytes) else line
except UnicodeDecodeError:
line = line.decode(encoding, errors=errors) if isinstance(line, bytes) else line
# optionally parse metadata in the first line
if ind == 0 and not ignore_onyx_metadata:
potential_meta = _extract_onyx_metadata(line)
if potential_meta is not None:
metadata = potential_meta
continue
file_content_raw += line
return file_content_raw, metadata
def get_blob_link(bucket_type: BlobType, s3_client: S3Client, bucket_name: str, key: str, bucket_region: str | None = None) -> str:
"""Get object link for different blob storage types"""
encoded_key = quote(key, safe="/")
if bucket_type == BlobType.R2:
account_id = s3_client.meta.endpoint_url.split("//")[1].split(".")[0]
subdomain = "eu/" if "eu." in s3_client.meta.endpoint_url else "default/"
return f"https://dash.cloudflare.com/{account_id}/r2/{subdomain}buckets/{bucket_name}/objects/{encoded_key}/details"
elif bucket_type == BlobType.S3:
region = bucket_region or s3_client.meta.region_name
return f"https://s3.console.aws.amazon.com/s3/object/{bucket_name}?region={region}&prefix={encoded_key}"
elif bucket_type == BlobType.GOOGLE_CLOUD_STORAGE:
return f"https://console.cloud.google.com/storage/browser/_details/{bucket_name}/{encoded_key}"
elif bucket_type == BlobType.OCI_STORAGE:
namespace = s3_client.meta.endpoint_url.split("//")[1].split(".")[0]
region = s3_client.meta.region_name
return f"https://objectstorage.{region}.oraclecloud.com/n/{namespace}/b/{bucket_name}/o/{encoded_key}"
else:
raise ValueError(f"Unsupported bucket type: {bucket_type}")
def extract_size_bytes(obj: Mapping[str, Any]) -> int | None:
"""Extract size bytes from object metadata"""
candidate_keys = (
"Size",
"size",
"ContentLength",
"content_length",
"Content-Length",
"contentLength",
"bytes",
"Bytes",
)
def _normalize(value: Any) -> int | None:
if value is None or isinstance(value, bool):
return None
if isinstance(value, Integral):
return int(value)
try:
numeric = float(value)
except (TypeError, ValueError):
return None
if numeric >= 0 and numeric.is_integer():
return int(numeric)
return None
for key in candidate_keys:
if key in obj:
normalized = _normalize(obj.get(key))
if normalized is not None:
return normalized
for key, value in obj.items():
if not isinstance(key, str):
continue
lowered_key = key.lower()
if "size" in lowered_key or "length" in lowered_key:
normalized = _normalize(value)
if normalized is not None:
return normalized
return None
def get_file_ext(file_name: str) -> str:
"""Get file extension"""
return os.path.splitext(file_name)[1].lower()
def is_accepted_file_ext(file_ext: str, extension_type: OnyxExtensionType) -> bool:
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".webp"}
text_extensions = {".txt", ".md", ".mdx", ".conf", ".log", ".json", ".csv", ".tsv", ".xml", ".yml", ".yaml", ".sql"}
document_extensions = {".pdf", ".docx", ".pptx", ".xlsx", ".eml", ".epub", ".html"}
if extension_type & OnyxExtensionType.Multimedia and file_ext in image_extensions:
return True
if extension_type & OnyxExtensionType.Plain and file_ext in text_extensions:
return True
if extension_type & OnyxExtensionType.Document and file_ext in document_extensions:
return True
return False
def detect_encoding(file: IO[bytes]) -> str:
raw_data = file.read(50000)
file.seek(0)
encoding = chardet.detect(raw_data)["encoding"] or "utf-8"
return encoding
def get_markitdown_converter():
global _MARKITDOWN_CONVERTER
from markitdown import MarkItDown
if _MARKITDOWN_CONVERTER is None:
_MARKITDOWN_CONVERTER = MarkItDown(enable_plugins=False)
return _MARKITDOWN_CONVERTER
def to_bytesio(stream: IO[bytes]) -> BytesIO:
if isinstance(stream, BytesIO):
return stream
data = stream.read() # consumes the stream!
return BytesIO(data)
# Slack Utilities
@lru_cache()
def get_base_url(token: str) -> str:
"""Get and cache Slack workspace base URL"""
client = WebClient(token=token)
return client.auth_test()["url"]
def get_message_link(event: dict, client: WebClient, channel_id: str) -> str:
"""Get message link"""
message_ts = event["ts"]
message_ts_without_dot = message_ts.replace(".", "")
thread_ts = event.get("thread_ts")
base_url = get_base_url(client.token)
link = f"{base_url.rstrip('/')}/archives/{channel_id}/p{message_ts_without_dot}" + (f"?thread_ts={thread_ts}" if thread_ts else "")
return link
def make_slack_api_call(call: Callable[..., SlackResponse], **kwargs: Any) -> SlackResponse:
"""Make Slack API call"""
return call(**kwargs)
def make_paginated_slack_api_call(call: Callable[..., SlackResponse], **kwargs: Any) -> Generator[dict[str, Any], None, None]:
"""Make paginated Slack API call"""
return _make_slack_api_call_paginated(call)(**kwargs)
def _make_slack_api_call_paginated(
call: Callable[..., SlackResponse],
) -> Callable[..., Generator[dict[str, Any], None, None]]:
"""Wrap Slack API call to automatically handle pagination"""
@wraps(call)
def paginated_call(**kwargs: Any) -> Generator[dict[str, Any], None, None]:
cursor: str | None = None
has_more = True
while has_more:
response = call(cursor=cursor, limit=_SLACK_LIMIT, **kwargs)
yield response.validate()
cursor = response.get("response_metadata", {}).get("next_cursor", "")
has_more = bool(cursor)
return paginated_call
def is_atlassian_date_error(e: Exception) -> bool:
return "field 'updated' is invalid" in str(e)
def expert_info_from_slack_id(
user_id: str | None,
client: WebClient,
user_cache: dict[str, BasicExpertInfo | None],
) -> BasicExpertInfo | None:
"""Get expert information from Slack user ID"""
if not user_id:
return None
if user_id in user_cache:
return user_cache[user_id]
response = client.users_info(user=user_id)
if not response["ok"]:
user_cache[user_id] = None
return None
user: dict = response.data.get("user", {})
profile = user.get("profile", {})
expert = BasicExpertInfo(
display_name=user.get("real_name") or profile.get("display_name"),
first_name=profile.get("first_name"),
last_name=profile.get("last_name"),
email=profile.get("email"),
)
user_cache[user_id] = expert
return expert
class SlackTextCleaner:
"""Slack text cleaning utility class"""
def __init__(self, client: WebClient) -> None:
self._client = client
self._id_to_name_map: dict[str, str] = {}
def _get_slack_name(self, user_id: str) -> str:
"""Get Slack username"""
if user_id not in self._id_to_name_map:
try:
response = self._client.users_info(user=user_id)
self._id_to_name_map[user_id] = response["user"]["profile"]["display_name"] or response["user"]["profile"]["real_name"]
except SlackApiError as e:
logging.exception(f"Error fetching data for user {user_id}: {e.response['error']}")
raise
return self._id_to_name_map[user_id]
def _replace_user_ids_with_names(self, message: str) -> str:
"""Replace user IDs with usernames"""
user_ids = re.findall("<@(.*?)>", message)
for user_id in user_ids:
try:
if user_id in self._id_to_name_map:
user_name = self._id_to_name_map[user_id]
else:
user_name = self._get_slack_name(user_id)
message = message.replace(f"<@{user_id}>", f"@{user_name}")
except Exception:
logging.exception(f"Unable to replace user ID with username for user_id '{user_id}'")
return message
def index_clean(self, message: str) -> str:
"""Index cleaning"""
message = self._replace_user_ids_with_names(message)
message = self.replace_tags_basic(message)
message = self.replace_channels_basic(message)
message = self.replace_special_mentions(message)
message = self.replace_special_catchall(message)
return message
@staticmethod
def replace_tags_basic(message: str) -> str:
"""Basic tag replacement"""
user_ids = re.findall("<@(.*?)>", message)
for user_id in user_ids:
message = message.replace(f"<@{user_id}>", f"@{user_id}")
return message
@staticmethod
def replace_channels_basic(message: str) -> str:
"""Basic channel replacement"""
channel_matches = re.findall(r"<#(.*?)\|(.*?)>", message)
for channel_id, channel_name in channel_matches:
message = message.replace(f"<#{channel_id}|{channel_name}>", f"#{channel_name}")
return message
@staticmethod
def replace_special_mentions(message: str) -> str:
"""Special mention replacement"""
message = message.replace("<!channel>", "@channel")
message = message.replace("<!here>", "@here")
message = message.replace("<!everyone>", "@everyone")
return message
@staticmethod
def replace_special_catchall(message: str) -> str:
"""Special catchall replacement"""
pattern = r"<!([^|]+)\|([^>]+)>"
return re.sub(pattern, r"\2", message)
@staticmethod
def add_zero_width_whitespace_after_tag(message: str) -> str:
"""Add zero-width whitespace after tag"""
return message.replace("@", "@\u200b")
# Gmail Utilities
def is_mail_service_disabled_error(error: HttpError) -> bool:
"""Detect if the Gmail API is telling us the mailbox is not provisioned."""
if error.resp.status != 400:
return False
error_message = str(error)
return "Mail service not enabled" in error_message or "failedPrecondition" in error_message
def build_time_range_query(
time_range_start: SecondsSinceUnixEpoch | None = None,
time_range_end: SecondsSinceUnixEpoch | None = None,
) -> str | None:
"""Build time range query for Gmail API"""
query = ""
if time_range_start is not None and time_range_start != 0:
query += f"after:{int(time_range_start) + 1}"
if time_range_end is not None and time_range_end != 0:
query += f" before:{int(time_range_end)}"
query = query.strip()
if len(query) == 0:
return None
return query
def clean_email_and_extract_name(email: str) -> tuple[str, str | None]:
"""Extract email address and display name from email string."""
email = email.strip()
if "<" in email and ">" in email:
# Handle format: "Display Name <email@domain.com>"
display_name = email[: email.find("<")].strip()
email_address = email[email.find("<") + 1 : email.find(">")].strip()
return email_address, display_name if display_name else None
else:
# Handle plain email address
return email.strip(), None
def get_message_body(payload: dict[str, Any]) -> str:
"""Extract message body text from Gmail message payload."""
parts = payload.get("parts", [])
message_body = ""
for part in parts:
mime_type = part.get("mimeType")
body = part.get("body")
if mime_type == "text/plain" and body:
data = body.get("data", "")
text = base64.urlsafe_b64decode(data).decode()
message_body += text
return message_body
def time_str_to_utc(time_str: str):
"""Convert time string to UTC datetime."""
from datetime import datetime
return datetime.fromisoformat(time_str.replace("Z", "+00:00"))
def gmail_time_str_to_utc(time_str: str):
"""Convert Gmail RFC 2822 time string to UTC."""
from email.utils import parsedate_to_datetime
from datetime import timezone
dt = parsedate_to_datetime(time_str)
return dt.astimezone(timezone.utc)
# Notion Utilities
T = TypeVar("T")
def batch_generator(
items: Iterable[T],
batch_size: int,
pre_batch_yield: Callable[[list[T]], None] | None = None,
) -> Generator[list[T], None, None]:
iterable = iter(items)
while True:
batch = list(islice(iterable, batch_size))
if not batch:
return
if pre_batch_yield:
pre_batch_yield(batch)
yield batch
@retry(tries=3, delay=1, backoff=2)
def fetch_notion_data(url: str, headers: dict[str, str], method: str = "GET", json_data: Optional[dict] = None) -> dict[str, Any]:
"""Fetch data from Notion API with retry logic."""
try:
if method == "GET":
response = rl_requests.get(url, headers=headers, timeout=_NOTION_CALL_TIMEOUT)
elif method == "POST":
response = rl_requests.post(url, headers=headers, json=json_data, timeout=_NOTION_CALL_TIMEOUT)
else:
raise ValueError(f"Unsupported HTTP method: {method}")
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
logging.error(f"Error fetching data from Notion API: {e}")
raise
def properties_to_str(properties: dict[str, Any]) -> str:
"""Convert Notion properties to a string representation."""
def _recurse_list_properties(inner_list: list[Any]) -> str | None:
list_properties: list[str | None] = []
for item in inner_list:
if item and isinstance(item, dict):
list_properties.append(_recurse_properties(item))
elif item and isinstance(item, list):
list_properties.append(_recurse_list_properties(item))
else:
list_properties.append(str(item))
return ", ".join([list_property for list_property in list_properties if list_property]) or None
def _recurse_properties(inner_dict: dict[str, Any]) -> str | None:
sub_inner_dict: dict[str, Any] | list[Any] | str = inner_dict
while isinstance(sub_inner_dict, dict) and "type" in sub_inner_dict:
type_name = sub_inner_dict["type"]
sub_inner_dict = sub_inner_dict[type_name]
if not sub_inner_dict:
return None
if isinstance(sub_inner_dict, list):
return _recurse_list_properties(sub_inner_dict)
elif isinstance(sub_inner_dict, str):
return sub_inner_dict
elif isinstance(sub_inner_dict, dict):
if "name" in sub_inner_dict:
return sub_inner_dict["name"]
if "content" in sub_inner_dict:
return sub_inner_dict["content"]
start = sub_inner_dict.get("start")
end = sub_inner_dict.get("end")
if start is not None:
if end is not None:
return f"{start} - {end}"
return start
elif end is not None:
return f"Until {end}"
if "id" in sub_inner_dict:
logging.debug("Skipping Notion object id field property")
return None
logging.debug(f"Unreadable property from innermost prop: {sub_inner_dict}")
return None
result = ""
for prop_name, prop in properties.items():
if not prop or not isinstance(prop, dict):
continue
try:
inner_value = _recurse_properties(prop)
except Exception as e:
logging.warning(f"Error recursing properties for {prop_name}: {e}")
continue
if inner_value:
result += f"{prop_name}: {inner_value}\t"
return result
def filter_pages_by_time(pages: list[dict[str, Any]], start: float, end: float, filter_field: str = "last_edited_time") -> list[dict[str, Any]]:
"""Filter pages by time range."""
from datetime import datetime
filtered_pages: list[dict[str, Any]] = []
for page in pages:
timestamp = page[filter_field].replace(".000Z", "+00:00")
compare_time = datetime.fromisoformat(timestamp).timestamp()
if compare_time > start and compare_time <= end:
filtered_pages.append(page)
return filtered_pages
def _load_all_docs(
connector: CheckpointedConnector[CT],
load: LoadFunction,
) -> list[Document]:
num_iterations = 0
checkpoint = cast(CT, connector.build_dummy_checkpoint())
documents: list[Document] = []
while checkpoint.has_more:
doc_batch_generator = CheckpointOutputWrapper[CT]()(load(checkpoint))
for document, failure, next_checkpoint in doc_batch_generator:
if failure is not None:
raise RuntimeError(f"Failed to load documents: {failure}")
if document is not None and isinstance(document, Document):
documents.append(document)
if next_checkpoint is not None:
checkpoint = next_checkpoint
num_iterations += 1
if num_iterations > _ITERATION_LIMIT:
raise RuntimeError("Too many iterations. Infinite loop?")
return documents
def load_all_docs_from_checkpoint_connector(
connector: CheckpointedConnector[CT],
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
) -> list[Document]:
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/webdav_connector.py | common/data_source/webdav_connector.py | """WebDAV connector"""
import logging
import os
from datetime import datetime, timezone
from typing import Any, Optional
from webdav4.client import Client as WebDAVClient
from common.data_source.utils import (
get_file_ext,
)
from common.data_source.config import DocumentSource, INDEX_BATCH_SIZE, BLOB_STORAGE_SIZE_THRESHOLD
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError
)
from common.data_source.interfaces import LoadConnector, PollConnector
from common.data_source.models import Document, SecondsSinceUnixEpoch, GenerateDocumentsOutput
class WebDAVConnector(LoadConnector, PollConnector):
"""WebDAV connector for syncing files from WebDAV servers"""
def __init__(
self,
base_url: str,
remote_path: str = "/",
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
"""Initialize WebDAV connector
Args:
base_url: Base URL of the WebDAV server (e.g., "https://webdav.example.com")
remote_path: Remote path to sync from (default: "/")
batch_size: Number of documents per batch
"""
self.base_url = base_url.rstrip("/")
if not remote_path:
remote_path = "/"
if not remote_path.startswith("/"):
remote_path = f"/{remote_path}"
if remote_path.endswith("/") and remote_path != "/":
remote_path = remote_path.rstrip("/")
self.remote_path = remote_path
self.batch_size = batch_size
self.client: Optional[WebDAVClient] = None
self._allow_images: bool | None = None
self.size_threshold: int | None = BLOB_STORAGE_SIZE_THRESHOLD
def set_allow_images(self, allow_images: bool) -> None:
"""Set whether to process images"""
logging.info(f"Setting allow_images to {allow_images}.")
self._allow_images = allow_images
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load credentials and initialize WebDAV client
Args:
credentials: Dictionary containing 'username' and 'password'
Returns:
None
Raises:
ConnectorMissingCredentialError: If required credentials are missing
"""
logging.debug(f"Loading credentials for WebDAV server {self.base_url}")
username = credentials.get("username")
password = credentials.get("password")
if not username or not password:
raise ConnectorMissingCredentialError(
"WebDAV requires 'username' and 'password' credentials"
)
try:
# Initialize WebDAV client
self.client = WebDAVClient(
base_url=self.base_url,
auth=(username, password)
)
except Exception as e:
logging.error(f"Failed to connect to WebDAV server: {e}")
raise ConnectorMissingCredentialError(
f"Failed to authenticate with WebDAV server: {e}"
)
return None
def _list_files_recursive(
self,
path: str,
start: datetime,
end: datetime,
) -> list[tuple[str, dict]]:
"""Recursively list all files in the given path
Args:
path: Path to list files from
start: Start datetime for filtering
end: End datetime for filtering
Returns:
List of tuples containing (file_path, file_info)
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV client not initialized")
files = []
try:
logging.debug(f"Listing directory: {path}")
for item in self.client.ls(path, detail=True):
item_path = item['name']
if item_path == path or item_path == path + '/':
continue
logging.debug(f"Found item: {item_path}, type: {item.get('type')}")
if item.get('type') == 'directory':
try:
files.extend(self._list_files_recursive(item_path, start, end))
except Exception as e:
logging.error(f"Error recursing into directory {item_path}: {e}")
continue
else:
try:
modified_time = item.get('modified')
if modified_time:
if isinstance(modified_time, datetime):
modified = modified_time
if modified.tzinfo is None:
modified = modified.replace(tzinfo=timezone.utc)
elif isinstance(modified_time, str):
try:
modified = datetime.strptime(modified_time, '%a, %d %b %Y %H:%M:%S %Z')
modified = modified.replace(tzinfo=timezone.utc)
except (ValueError, TypeError):
try:
modified = datetime.fromisoformat(modified_time.replace('Z', '+00:00'))
except (ValueError, TypeError):
logging.warning(f"Could not parse modified time for {item_path}: {modified_time}")
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
logging.debug(f"File {item_path}: modified={modified}, start={start}, end={end}, include={start < modified <= end}")
if start < modified <= end:
files.append((item_path, item))
else:
logging.debug(f"File {item_path} filtered out by time range")
except Exception as e:
logging.error(f"Error processing file {item_path}: {e}")
continue
except Exception as e:
logging.error(f"Error listing directory {path}: {e}")
return files
def _yield_webdav_documents(
self,
start: datetime,
end: datetime,
) -> GenerateDocumentsOutput:
"""Generate documents from WebDAV server
Args:
start: Start datetime for filtering
end: End datetime for filtering
Yields:
Batches of documents
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV client not initialized")
logging.info(f"Searching for files in {self.remote_path} between {start} and {end}")
files = self._list_files_recursive(self.remote_path, start, end)
logging.info(f"Found {len(files)} files matching time criteria")
filename_counts: dict[str, int] = {}
for file_path, _ in files:
file_name = os.path.basename(file_path)
filename_counts[file_name] = filename_counts.get(file_name, 0) + 1
batch: list[Document] = []
for file_path, file_info in files:
file_name = os.path.basename(file_path)
size_bytes = file_info.get('size', 0)
if (
self.size_threshold is not None
and isinstance(size_bytes, int)
and size_bytes > self.size_threshold
):
logging.warning(
f"{file_name} exceeds size threshold of {self.size_threshold}. Skipping."
)
continue
try:
logging.debug(f"Downloading file: {file_path}")
from io import BytesIO
buffer = BytesIO()
self.client.download_fileobj(file_path, buffer)
blob = buffer.getvalue()
if blob is None or len(blob) == 0:
logging.warning(f"Downloaded content is empty for {file_path}")
continue
modified_time = file_info.get('modified')
if modified_time:
if isinstance(modified_time, datetime):
modified = modified_time
if modified.tzinfo is None:
modified = modified.replace(tzinfo=timezone.utc)
elif isinstance(modified_time, str):
try:
modified = datetime.strptime(modified_time, '%a, %d %b %Y %H:%M:%S %Z')
modified = modified.replace(tzinfo=timezone.utc)
except (ValueError, TypeError):
try:
modified = datetime.fromisoformat(modified_time.replace('Z', '+00:00'))
except (ValueError, TypeError):
logging.warning(f"Could not parse modified time for {file_path}: {modified_time}")
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
else:
modified = datetime.now(timezone.utc)
if filename_counts.get(file_name, 0) > 1:
relative_path = file_path
if file_path.startswith(self.remote_path):
relative_path = file_path[len(self.remote_path):]
if relative_path.startswith('/'):
relative_path = relative_path[1:]
semantic_id = relative_path.replace('/', ' / ') if relative_path else file_name
else:
semantic_id = file_name
batch.append(
Document(
id=f"webdav:{self.base_url}:{file_path}",
blob=blob,
source=DocumentSource.WEBDAV,
semantic_identifier=semantic_id,
extension=get_file_ext(file_name),
doc_updated_at=modified,
size_bytes=size_bytes if size_bytes else 0
)
)
if len(batch) == self.batch_size:
yield batch
batch = []
except Exception as e:
logging.exception(f"Error downloading file {file_path}: {e}")
if batch:
yield batch
def load_from_state(self) -> GenerateDocumentsOutput:
"""Load all documents from WebDAV server
Yields:
Batches of documents
"""
logging.debug(f"Loading documents from WebDAV server {self.base_url}")
return self._yield_webdav_documents(
start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime.now(timezone.utc),
)
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
"""Poll WebDAV server for updated documents
Args:
start: Start timestamp (seconds since Unix epoch)
end: End timestamp (seconds since Unix epoch)
Yields:
Batches of documents
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV client not initialized")
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc)
for batch in self._yield_webdav_documents(start_datetime, end_datetime):
yield batch
def validate_connector_settings(self) -> None:
"""Validate WebDAV connector settings.
Validation should exercise the same code-paths used by the connector
(directory listing / PROPFIND), avoiding exists() which may probe with
methods that differ across servers.
"""
if self.client is None:
raise ConnectorMissingCredentialError("WebDAV credentials not loaded.")
if not self.base_url:
raise ConnectorValidationError("No base URL was provided in connector settings.")
# Normalize directory path: for collections, many servers behave better with trailing '/'
test_path = self.remote_path or "/"
if not test_path.startswith("/"):
test_path = f"/{test_path}"
if test_path != "/" and not test_path.endswith("/"):
test_path = f"{test_path}/"
try:
# Use the same behavior as real sync: list directory with details (PROPFIND)
self.client.ls(test_path, detail=True)
except Exception as e:
# Prefer structured status codes if present on the exception/response
status = None
for attr in ("status_code", "code"):
v = getattr(e, attr, None)
if isinstance(v, int):
status = v
break
if status is None:
resp = getattr(e, "response", None)
v = getattr(resp, "status_code", None)
if isinstance(v, int):
status = v
# If we can classify by status code, do it
if status == 401:
raise CredentialExpiredError("WebDAV credentials appear invalid or expired.")
if status == 403:
raise InsufficientPermissionsError(
f"Insufficient permissions to access path '{self.remote_path}' on WebDAV server."
)
if status == 404:
raise ConnectorValidationError(
f"Remote path '{self.remote_path}' does not exist on WebDAV server."
)
# Fallback: avoid brittle substring matching that caused false positives.
# Provide the original exception for diagnosis.
raise ConnectorValidationError(
f"WebDAV validation failed for path '{test_path}': {repr(e)}"
)
if __name__ == "__main__":
credentials_dict = {
"username": os.environ.get("WEBDAV_USERNAME"),
"password": os.environ.get("WEBDAV_PASSWORD"),
}
credentials_dict = {
"username": "user",
"password": "pass",
}
connector = WebDAVConnector(
base_url="http://172.17.0.1:8080/",
remote_path="/",
)
try:
connector.load_credentials(credentials_dict)
connector.validate_connector_settings()
document_batch_generator = connector.load_from_state()
for document_batch in document_batch_generator:
print("First batch of documents:")
for doc in document_batch:
print(f"Document ID: {doc.id}")
print(f"Semantic Identifier: {doc.semantic_identifier}")
print(f"Source: {doc.source}")
print(f"Updated At: {doc.doc_updated_at}")
print("---")
break
except ConnectorMissingCredentialError as e:
print(f"Error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/imap_connector.py | common/data_source/imap_connector.py | import copy
import email
from email.header import decode_header
import imaplib
import logging
import os
import re
from datetime import datetime, timedelta
from datetime import timezone
from email.message import Message
from email.utils import collapse_rfc2231_value, parseaddr
from enum import Enum
from typing import Any
from typing import cast
import uuid
import bs4
from pydantic import BaseModel
from common.data_source.config import IMAP_CONNECTOR_SIZE_THRESHOLD, DocumentSource
from common.data_source.interfaces import CheckpointOutput, CheckpointedConnectorWithPermSync, CredentialsConnector, CredentialsProviderInterface
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, Document, ExternalAccess, SecondsSinceUnixEpoch
_DEFAULT_IMAP_PORT_NUMBER = int(os.environ.get("IMAP_PORT", 993))
_IMAP_OKAY_STATUS = "OK"
_PAGE_SIZE = 100
_USERNAME_KEY = "imap_username"
_PASSWORD_KEY = "imap_password"
class Header(str, Enum):
SUBJECT_HEADER = "subject"
FROM_HEADER = "from"
TO_HEADER = "to"
CC_HEADER = "cc"
DELIVERED_TO_HEADER = (
"Delivered-To" # Used in mailing lists instead of the "to" header.
)
DATE_HEADER = "date"
MESSAGE_ID_HEADER = "Message-ID"
class EmailHeaders(BaseModel):
"""
Model for email headers extracted from IMAP messages.
"""
id: str
subject: str
sender: str
recipients: str | None
cc: str | None
date: datetime
@classmethod
def from_email_msg(cls, email_msg: Message) -> "EmailHeaders":
def _decode(header: str, default: str | None = None) -> str | None:
value = email_msg.get(header, default)
if not value:
return None
decoded_fragments = decode_header(value)
decoded_strings: list[str] = []
for decoded_value, encoding in decoded_fragments:
if isinstance(decoded_value, bytes):
try:
decoded_strings.append(
decoded_value.decode(encoding or "utf-8", errors="replace")
)
except LookupError:
decoded_strings.append(
decoded_value.decode("utf-8", errors="replace")
)
elif isinstance(decoded_value, str):
decoded_strings.append(decoded_value)
else:
decoded_strings.append(str(decoded_value))
return "".join(decoded_strings)
def _parse_date(date_str: str | None) -> datetime | None:
if not date_str:
return None
try:
return email.utils.parsedate_to_datetime(date_str)
except (TypeError, ValueError):
return None
message_id = _decode(header=Header.MESSAGE_ID_HEADER)
if not message_id:
message_id = f"<generated-{uuid.uuid4()}@imap.local>"
# It's possible for the subject line to not exist or be an empty string.
subject = _decode(header=Header.SUBJECT_HEADER) or "Unknown Subject"
from_ = _decode(header=Header.FROM_HEADER)
to = _decode(header=Header.TO_HEADER)
if not to:
to = _decode(header=Header.DELIVERED_TO_HEADER)
cc = _decode(header=Header.CC_HEADER)
date_str = _decode(header=Header.DATE_HEADER)
date = _parse_date(date_str=date_str)
if not date:
date = datetime.now(tz=timezone.utc)
# If any of the above are `None`, model validation will fail.
# Therefore, no guards (i.e.: `if <header> is None: raise RuntimeError(..)`) were written.
return cls.model_validate(
{
"id": message_id,
"subject": subject,
"sender": from_,
"recipients": to,
"cc": cc,
"date": date,
}
)
class CurrentMailbox(BaseModel):
mailbox: str
todo_email_ids: list[str]
# An email has a list of mailboxes.
# Each mailbox has a list of email-ids inside of it.
#
# Usage:
# To use this checkpointer, first fetch all the mailboxes.
# Then, pop a mailbox and fetch all of its email-ids.
# Then, pop each email-id and fetch its content (and parse it, etc..).
# When you have popped all email-ids for this mailbox, pop the next mailbox and repeat the above process until you're done.
#
# For initial checkpointing, set both fields to `None`.
class ImapCheckpoint(ConnectorCheckpoint):
todo_mailboxes: list[str] | None = None
current_mailbox: CurrentMailbox | None = None
class LoginState(str, Enum):
LoggedIn = "logged_in"
LoggedOut = "logged_out"
class ImapConnector(
CredentialsConnector,
CheckpointedConnectorWithPermSync,
):
def __init__(
self,
host: str,
port: int = _DEFAULT_IMAP_PORT_NUMBER,
mailboxes: list[str] | None = None,
) -> None:
self._host = host
self._port = port
self._mailboxes = mailboxes
self._credentials: dict[str, Any] | None = None
@property
def credentials(self) -> dict[str, Any]:
if not self._credentials:
raise RuntimeError(
"Credentials have not been initialized; call `set_credentials_provider` first"
)
return self._credentials
def _get_mail_client(self) -> imaplib.IMAP4_SSL:
"""
Returns a new `imaplib.IMAP4_SSL` instance.
The `imaplib.IMAP4_SSL` object is supposed to be an "ephemeral" object; it's not something that you can login,
logout, then log back into again. I.e., the following will fail:
```py
mail_client.login(..)
mail_client.logout();
mail_client.login(..)
```
Therefore, you need a fresh, new instance in order to operate with IMAP. This function gives one to you.
# Notes
This function will throw an error if the credentials have not yet been set.
"""
def get_or_raise(name: str) -> str:
value = self.credentials.get(name)
if not value:
raise RuntimeError(f"Credential item {name=} was not found")
if not isinstance(value, str):
raise RuntimeError(
f"Credential item {name=} must be of type str, instead received {type(name)=}"
)
return value
username = get_or_raise(_USERNAME_KEY)
password = get_or_raise(_PASSWORD_KEY)
mail_client = imaplib.IMAP4_SSL(host=self._host, port=self._port)
status, _data = mail_client.login(user=username, password=password)
if status != _IMAP_OKAY_STATUS:
raise RuntimeError(f"Failed to log into imap server; {status=}")
return mail_client
def _load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ImapCheckpoint,
include_perm_sync: bool,
) -> CheckpointOutput[ImapCheckpoint]:
checkpoint = cast(ImapCheckpoint, copy.deepcopy(checkpoint))
checkpoint.has_more = True
mail_client = self._get_mail_client()
if checkpoint.todo_mailboxes is None:
# This is the dummy checkpoint.
# Fill it with mailboxes first.
if self._mailboxes:
checkpoint.todo_mailboxes = _sanitize_mailbox_names(self._mailboxes)
else:
fetched_mailboxes = _fetch_all_mailboxes_for_email_account(
mail_client=mail_client
)
if not fetched_mailboxes:
raise RuntimeError(
"Failed to find any mailboxes for this email account"
)
checkpoint.todo_mailboxes = _sanitize_mailbox_names(fetched_mailboxes)
return checkpoint
if (
not checkpoint.current_mailbox
or not checkpoint.current_mailbox.todo_email_ids
):
if not checkpoint.todo_mailboxes:
checkpoint.has_more = False
return checkpoint
mailbox = checkpoint.todo_mailboxes.pop()
email_ids = _fetch_email_ids_in_mailbox(
mail_client=mail_client,
mailbox=mailbox,
start=start,
end=end,
)
checkpoint.current_mailbox = CurrentMailbox(
mailbox=mailbox,
todo_email_ids=email_ids,
)
_select_mailbox(
mail_client=mail_client, mailbox=checkpoint.current_mailbox.mailbox
)
current_todos = cast(
list, copy.deepcopy(checkpoint.current_mailbox.todo_email_ids[:_PAGE_SIZE])
)
checkpoint.current_mailbox.todo_email_ids = (
checkpoint.current_mailbox.todo_email_ids[_PAGE_SIZE:]
)
for email_id in current_todos:
email_msg = _fetch_email(mail_client=mail_client, email_id=email_id)
if not email_msg:
logging.warning(f"Failed to fetch message {email_id=}; skipping")
continue
email_headers = EmailHeaders.from_email_msg(email_msg=email_msg)
msg_dt = email_headers.date
if msg_dt.tzinfo is None:
msg_dt = msg_dt.replace(tzinfo=timezone.utc)
else:
msg_dt = msg_dt.astimezone(timezone.utc)
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
if not (start_dt < msg_dt <= end_dt):
continue
email_doc = _convert_email_headers_and_body_into_document(
email_msg=email_msg,
email_headers=email_headers,
include_perm_sync=include_perm_sync,
)
yield email_doc
attachments = extract_attachments(email_msg)
for att in attachments:
yield attachment_to_document(email_doc, att, email_headers)
return checkpoint
# impls for BaseConnector
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._credentials = credentials
return None
def validate_connector_settings(self) -> None:
self._get_mail_client()
# impls for CredentialsConnector
def set_credentials_provider(
self, credentials_provider: CredentialsProviderInterface
) -> None:
self._credentials = credentials_provider.get_credentials()
# impls for CheckpointedConnector
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ImapCheckpoint,
) -> CheckpointOutput[ImapCheckpoint]:
return self._load_from_checkpoint(
start=start, end=end, checkpoint=checkpoint, include_perm_sync=False
)
def build_dummy_checkpoint(self) -> ImapCheckpoint:
return ImapCheckpoint(has_more=True)
def validate_checkpoint_json(self, checkpoint_json: str) -> ImapCheckpoint:
return ImapCheckpoint.model_validate_json(json_data=checkpoint_json)
# impls for CheckpointedConnectorWithPermSync
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ImapCheckpoint,
) -> CheckpointOutput[ImapCheckpoint]:
return self._load_from_checkpoint(
start=start, end=end, checkpoint=checkpoint, include_perm_sync=True
)
def _fetch_all_mailboxes_for_email_account(mail_client: imaplib.IMAP4_SSL) -> list[str]:
status, mailboxes_data = mail_client.list('""', "*")
if status != _IMAP_OKAY_STATUS:
raise RuntimeError(f"Failed to fetch mailboxes; {status=}")
mailboxes = []
for mailboxes_raw in mailboxes_data:
if isinstance(mailboxes_raw, bytes):
mailboxes_str = mailboxes_raw.decode()
elif isinstance(mailboxes_raw, str):
mailboxes_str = mailboxes_raw
else:
logging.warning(
f"Expected the mailbox data to be of type str, instead got {type(mailboxes_raw)=} {mailboxes_raw}; skipping"
)
continue
# The mailbox LIST response output can be found here:
# https://www.rfc-editor.org/rfc/rfc3501.html#section-7.2.2
#
# The general format is:
# `(<name-attributes>) <hierarchy-delimiter> <mailbox-name>`
#
# The below regex matches on that pattern; from there, we select the 3rd match (index 2), which is the mailbox-name.
match = re.match(r'\([^)]*\)\s+"([^"]+)"\s+"?(.+?)"?$', mailboxes_str)
if not match:
logging.warning(
f"Invalid mailbox-data formatting structure: {mailboxes_str=}; skipping"
)
continue
mailbox = match.group(2)
mailboxes.append(mailbox)
if not mailboxes:
logging.warning(
"No mailboxes parsed from LIST response; falling back to INBOX"
)
return ["INBOX"]
return mailboxes
def _select_mailbox(mail_client: imaplib.IMAP4_SSL, mailbox: str) -> bool:
try:
status, _ = mail_client.select(mailbox=mailbox, readonly=True)
if status != _IMAP_OKAY_STATUS:
return False
return True
except Exception:
return False
def _fetch_email_ids_in_mailbox(
mail_client: imaplib.IMAP4_SSL,
mailbox: str,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
) -> list[str]:
if not _select_mailbox(mail_client, mailbox):
logging.warning(f"Skip mailbox: {mailbox}")
return []
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end, tz=timezone.utc) + timedelta(days=1)
start_str = start_dt.strftime("%d-%b-%Y")
end_str = end_dt.strftime("%d-%b-%Y")
search_criteria = f'(SINCE "{start_str}" BEFORE "{end_str}")'
status, email_ids_byte_array = mail_client.search(None, search_criteria)
if status != _IMAP_OKAY_STATUS or not email_ids_byte_array:
raise RuntimeError(f"Failed to fetch email ids; {status=}")
email_ids: bytes = email_ids_byte_array[0]
return [email_id.decode() for email_id in email_ids.split()]
def _fetch_email(mail_client: imaplib.IMAP4_SSL, email_id: str) -> Message | None:
status, msg_data = mail_client.fetch(message_set=email_id, message_parts="(RFC822)")
if status != _IMAP_OKAY_STATUS or not msg_data:
return None
data = msg_data[0]
if not isinstance(data, tuple):
raise RuntimeError(
f"Message data should be a tuple; instead got a {type(data)=} {data=}"
)
_, raw_email = data
return email.message_from_bytes(raw_email)
def _convert_email_headers_and_body_into_document(
email_msg: Message,
email_headers: EmailHeaders,
include_perm_sync: bool,
) -> Document:
sender_name, sender_addr = _parse_singular_addr(raw_header=email_headers.sender)
to_addrs = (
_parse_addrs(email_headers.recipients)
if email_headers.recipients
else []
)
cc_addrs = (
_parse_addrs(email_headers.cc)
if email_headers.cc
else []
)
all_participants = to_addrs + cc_addrs
expert_info_map = {
recipient_addr: BasicExpertInfo(
display_name=recipient_name, email=recipient_addr
)
for recipient_name, recipient_addr in all_participants
}
if sender_addr not in expert_info_map:
expert_info_map[sender_addr] = BasicExpertInfo(
display_name=sender_name, email=sender_addr
)
email_body = _parse_email_body(email_msg=email_msg, email_headers=email_headers)
primary_owners = list(expert_info_map.values())
external_access = (
ExternalAccess(
external_user_emails=set(expert_info_map.keys()),
external_user_group_ids=set(),
is_public=False,
)
if include_perm_sync
else None
)
return Document(
id=email_headers.id,
title=email_headers.subject,
blob=email_body,
size_bytes=len(email_body),
semantic_identifier=email_headers.subject,
metadata={},
extension='.txt',
doc_updated_at=email_headers.date,
source=DocumentSource.IMAP,
primary_owners=primary_owners,
external_access=external_access,
)
def extract_attachments(email_msg: Message, max_bytes: int = IMAP_CONNECTOR_SIZE_THRESHOLD):
attachments = []
if not email_msg.is_multipart():
return attachments
for part in email_msg.walk():
if part.get_content_maintype() == "multipart":
continue
disposition = (part.get("Content-Disposition") or "").lower()
filename = part.get_filename()
if not (
disposition.startswith("attachment")
or (disposition.startswith("inline") and filename)
):
continue
payload = part.get_payload(decode=True)
if not payload:
continue
if len(payload) > max_bytes:
continue
attachments.append({
"filename": filename or "attachment.bin",
"content_type": part.get_content_type(),
"content_bytes": payload,
"size_bytes": len(payload),
})
return attachments
def decode_mime_filename(raw: str | None) -> str | None:
if not raw:
return None
try:
raw = collapse_rfc2231_value(raw)
except Exception:
pass
parts = decode_header(raw)
decoded = []
for value, encoding in parts:
if isinstance(value, bytes):
decoded.append(value.decode(encoding or "utf-8", errors="replace"))
else:
decoded.append(value)
return "".join(decoded)
def attachment_to_document(
parent_doc: Document,
att: dict,
email_headers: EmailHeaders,
):
raw_filename = att["filename"]
filename = decode_mime_filename(raw_filename) or "attachment.bin"
ext = "." + filename.split(".")[-1] if "." in filename else ""
return Document(
id=f"{parent_doc.id}#att:{filename}",
source=DocumentSource.IMAP,
semantic_identifier=filename,
extension=ext,
blob=att["content_bytes"],
size_bytes=att["size_bytes"],
doc_updated_at=email_headers.date,
primary_owners=parent_doc.primary_owners,
metadata={
"parent_email_id": parent_doc.id,
"parent_subject": email_headers.subject,
"attachment_filename": filename,
"attachment_content_type": att["content_type"],
},
)
def _parse_email_body(
email_msg: Message,
email_headers: EmailHeaders,
) -> str:
body = None
for part in email_msg.walk():
if part.is_multipart():
# Multipart parts are *containers* for other parts, not the actual content itself.
# Therefore, we skip until we find the individual parts instead.
continue
charset = part.get_content_charset() or "utf-8"
try:
raw_payload = part.get_payload(decode=True)
if not isinstance(raw_payload, bytes):
logging.warning(
"Payload section from email was expected to be an array of bytes, instead got "
f"{type(raw_payload)=}, {raw_payload=}"
)
continue
body = raw_payload.decode(charset)
break
except (UnicodeDecodeError, LookupError) as e:
logging.warning(f"Could not decode part with charset {charset}. Error: {e}")
continue
if not body:
logging.warning(
f"Email with {email_headers.id=} has an empty body; returning an empty string"
)
return ""
soup = bs4.BeautifulSoup(markup=body, features="html.parser")
return " ".join(str_section for str_section in soup.stripped_strings)
def _sanitize_mailbox_names(mailboxes: list[str]) -> list[str]:
"""
Mailboxes with special characters in them must be enclosed by double-quotes, as per the IMAP protocol.
Just to be safe, we wrap *all* mailboxes with double-quotes.
"""
return [f'"{mailbox}"' for mailbox in mailboxes if mailbox]
def _parse_addrs(raw_header: str) -> list[tuple[str, str]]:
addrs = raw_header.split(",")
name_addr_pairs = [parseaddr(addr=addr) for addr in addrs if addr]
return [(name, addr) for name, addr in name_addr_pairs if addr]
def _parse_singular_addr(raw_header: str) -> tuple[str, str]:
addrs = _parse_addrs(raw_header=raw_header)
if not addrs:
return ("Unknown", "unknown@example.com")
elif len(addrs) >= 2:
raise RuntimeError(
f"Expected a singular address, but instead got multiple; {raw_header=} {addrs=}"
)
return addrs[0]
if __name__ == "__main__":
import time
from types import TracebackType
from common.data_source.utils import load_all_docs_from_checkpoint_connector
class OnyxStaticCredentialsProvider(
CredentialsProviderInterface["OnyxStaticCredentialsProvider"]
):
"""Implementation (a very simple one!) to handle static credentials."""
def __init__(
self,
tenant_id: str | None,
connector_name: str,
credential_json: dict[str, Any],
):
self._tenant_id = tenant_id
self._connector_name = connector_name
self._credential_json = credential_json
self._provider_key = str(uuid.uuid4())
def __enter__(self) -> "OnyxStaticCredentialsProvider":
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def get_tenant_id(self) -> str | None:
return self._tenant_id
def get_provider_key(self) -> str:
return self._provider_key
def get_credentials(self) -> dict[str, Any]:
return self._credential_json
def set_credentials(self, credential_json: dict[str, Any]) -> None:
self._credential_json = credential_json
def is_dynamic(self) -> bool:
return False
# from tests.daily.connectors.utils import load_all_docs_from_checkpoint_connector
# from onyx.connectors.credentials_provider import OnyxStaticCredentialsProvider
host = os.environ.get("IMAP_HOST")
mailboxes_str = os.environ.get("IMAP_MAILBOXES","INBOX")
username = os.environ.get("IMAP_USERNAME")
password = os.environ.get("IMAP_PASSWORD")
mailboxes = (
[mailbox.strip() for mailbox in mailboxes_str.split(",")]
if mailboxes_str
else []
)
if not host:
raise RuntimeError("`IMAP_HOST` must be set")
imap_connector = ImapConnector(
host=host,
mailboxes=mailboxes,
)
imap_connector.set_credentials_provider(
OnyxStaticCredentialsProvider(
tenant_id=None,
connector_name=DocumentSource.IMAP,
credential_json={
_USERNAME_KEY: username,
_PASSWORD_KEY: password,
},
)
)
END = time.time()
START = END - 1 * 24 * 60 * 60
for doc in load_all_docs_from_checkpoint_connector(
connector=imap_connector,
start=START,
end=END,
):
print(doc.id,doc.extension) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/config.py | common/data_source/config.py | """Configuration constants and enum definitions"""
import json
import os
from datetime import datetime, timezone
from enum import Enum
from typing import cast
def get_current_tz_offset() -> int:
# datetime now() gets local time, datetime.now(timezone.utc) gets UTC time.
# remove tzinfo to compare non-timezone-aware objects.
time_diff = datetime.now() - datetime.now(timezone.utc).replace(tzinfo=None)
return round(time_diff.total_seconds() / 3600)
# Default request timeout, mostly used by connectors
REQUEST_TIMEOUT_SECONDS = int(os.environ.get("REQUEST_TIMEOUT_SECONDS") or 60)
ONE_MINUTE = 60
ONE_HOUR = 3600
ONE_DAY = ONE_HOUR * 24
# Slack API limits
_SLACK_LIMIT = 900
# Redis lock configuration
ONYX_SLACK_LOCK_TTL = 1800
ONYX_SLACK_LOCK_BLOCKING_TIMEOUT = 60
ONYX_SLACK_LOCK_TOTAL_BLOCKING_TIMEOUT = 3600
class BlobType(str, Enum):
"""Supported storage types"""
S3 = "s3"
R2 = "r2"
GOOGLE_CLOUD_STORAGE = "google_cloud_storage"
OCI_STORAGE = "oci_storage"
S3_COMPATIBLE = "s3_compatible"
class DocumentSource(str, Enum):
"""Document sources"""
S3 = "s3"
NOTION = "notion"
R2 = "r2"
GOOGLE_CLOUD_STORAGE = "google_cloud_storage"
OCI_STORAGE = "oci_storage"
SLACK = "slack"
CONFLUENCE = "confluence"
JIRA = "jira"
GOOGLE_DRIVE = "google_drive"
GMAIL = "gmail"
DISCORD = "discord"
WEBDAV = "webdav"
MOODLE = "moodle"
S3_COMPATIBLE = "s3_compatible"
DROPBOX = "dropbox"
BOX = "box"
AIRTABLE = "airtable"
ASANA = "asana"
GITHUB = "github"
GITLAB = "gitlab"
IMAP = "imap"
BITBUCKET = "bitbucket"
ZENDESK = "zendesk"
class FileOrigin(str, Enum):
"""File origins"""
CONNECTOR = "connector"
# Standard image MIME types supported by most vision LLMs
IMAGE_MIME_TYPES = [
"image/png",
"image/jpeg",
"image/jpg",
"image/webp",
]
# Image types that should be excluded from processing
EXCLUDED_IMAGE_TYPES = [
"image/bmp",
"image/tiff",
"image/gif",
"image/svg+xml",
"image/avif",
]
_PAGE_EXPANSION_FIELDS = [
"body.storage.value",
"version",
"space",
"metadata.labels",
"history.lastUpdated",
"ancestors",
]
# Configuration constants
BLOB_STORAGE_SIZE_THRESHOLD = 20 * 1024 * 1024 # 20MB
INDEX_BATCH_SIZE = 2
SLACK_NUM_THREADS = 4
ENABLE_EXPENSIVE_EXPERT_CALLS = False
# Slack related constants
_SLACK_LIMIT = 900
FAST_TIMEOUT = 1
MAX_RETRIES = 7
MAX_CHANNELS_TO_LOG = 50
BOT_CHANNEL_MIN_BATCH_SIZE = 256
BOT_CHANNEL_PERCENTAGE_THRESHOLD = 0.95
# Download configuration
DOWNLOAD_CHUNK_SIZE = 1024 * 1024 # 1MB
SIZE_THRESHOLD_BUFFER = 64
NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP = (
os.environ.get("NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP", "").lower()
== "true"
)
SLIM_BATCH_SIZE = 100
# Notion API constants
_NOTION_PAGE_SIZE = 100
_NOTION_CALL_TIMEOUT = 30 # 30 seconds
_ITERATION_LIMIT = 100_000
#####
# Indexing Configs
#####
# NOTE: Currently only supported in the Confluence and Google Drive connectors +
# only handles some failures (Confluence = handles API call failures, Google
# Drive = handles failures pulling files / parsing them)
CONTINUE_ON_CONNECTOR_FAILURE = os.environ.get(
"CONTINUE_ON_CONNECTOR_FAILURE", ""
).lower() not in ["false", ""]
#####
# Confluence Connector Configs
#####
CONFLUENCE_CONNECTOR_LABELS_TO_SKIP = [
ignored_tag
for ignored_tag in os.environ.get("CONFLUENCE_CONNECTOR_LABELS_TO_SKIP", "").split(
","
)
if ignored_tag
]
# Avoid to get archived pages
CONFLUENCE_CONNECTOR_INDEX_ARCHIVED_PAGES = (
os.environ.get("CONFLUENCE_CONNECTOR_INDEX_ARCHIVED_PAGES", "").lower() == "true"
)
# Attachments exceeding this size will not be retrieved (in bytes)
CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD = int(
os.environ.get("CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
# Attachments with more chars than this will not be indexed. This is to prevent extremely
# large files from freezing indexing. 200,000 is ~100 google doc pages.
CONFLUENCE_CONNECTOR_ATTACHMENT_CHAR_COUNT_THRESHOLD = int(
os.environ.get("CONFLUENCE_CONNECTOR_ATTACHMENT_CHAR_COUNT_THRESHOLD", 200_000)
)
_RAW_CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE = os.environ.get(
"CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE", ""
)
CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE = cast(
list[dict[str, str]] | None,
(
json.loads(_RAW_CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE)
if _RAW_CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE
else None
),
)
# enter as a floating point offset from UTC in hours (-24 < val < 24)
# this will be applied globally, so it probably makes sense to transition this to per
# connector as some point.
# For the default value, we assume that the user's local timezone is more likely to be
# correct (i.e. the configured user's timezone or the default server one) than UTC.
# https://developer.atlassian.com/cloud/confluence/cql-fields/#created
CONFLUENCE_TIMEZONE_OFFSET = float(
os.environ.get("CONFLUENCE_TIMEZONE_OFFSET", get_current_tz_offset())
)
CONFLUENCE_SYNC_TIME_BUFFER_SECONDS = int(
os.environ.get("CONFLUENCE_SYNC_TIME_BUFFER_SECONDS", ONE_DAY)
)
GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
JIRA_CONNECTOR_LABELS_TO_SKIP = [
ignored_tag
for ignored_tag in os.environ.get("JIRA_CONNECTOR_LABELS_TO_SKIP", "").split(",")
if ignored_tag
]
JIRA_CONNECTOR_MAX_TICKET_SIZE = int(
os.environ.get("JIRA_CONNECTOR_MAX_TICKET_SIZE", 100 * 1024)
)
JIRA_SYNC_TIME_BUFFER_SECONDS = int(
os.environ.get("JIRA_SYNC_TIME_BUFFER_SECONDS", ONE_MINUTE)
)
JIRA_TIMEZONE_OFFSET = float(
os.environ.get("JIRA_TIMEZONE_OFFSET", get_current_tz_offset())
)
OAUTH_SLACK_CLIENT_ID = os.environ.get("OAUTH_SLACK_CLIENT_ID", "")
OAUTH_SLACK_CLIENT_SECRET = os.environ.get("OAUTH_SLACK_CLIENT_SECRET", "")
OAUTH_CONFLUENCE_CLOUD_CLIENT_ID = os.environ.get(
"OAUTH_CONFLUENCE_CLOUD_CLIENT_ID", ""
)
OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET = os.environ.get(
"OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET", ""
)
OAUTH_JIRA_CLOUD_CLIENT_ID = os.environ.get("OAUTH_JIRA_CLOUD_CLIENT_ID", "")
OAUTH_JIRA_CLOUD_CLIENT_SECRET = os.environ.get("OAUTH_JIRA_CLOUD_CLIENT_SECRET", "")
OAUTH_GOOGLE_DRIVE_CLIENT_ID = os.environ.get("OAUTH_GOOGLE_DRIVE_CLIENT_ID", "")
OAUTH_GOOGLE_DRIVE_CLIENT_SECRET = os.environ.get(
"OAUTH_GOOGLE_DRIVE_CLIENT_SECRET", ""
)
GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI = os.environ.get("GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/google-drive/oauth/web/callback")
GMAIL_WEB_OAUTH_REDIRECT_URI = os.environ.get("GMAIL_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/gmail/oauth/web/callback")
CONFLUENCE_OAUTH_TOKEN_URL = "https://auth.atlassian.com/oauth/token"
RATE_LIMIT_MESSAGE_LOWERCASE = "Rate limit exceeded".lower()
_DEFAULT_PAGINATION_LIMIT = 1000
_PROBLEMATIC_EXPANSIONS = "body.storage.value"
_REPLACEMENT_EXPANSIONS = "body.view.value"
BOX_WEB_OAUTH_REDIRECT_URI = os.environ.get("BOX_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/box/oauth/web/callback")
GITHUB_CONNECTOR_BASE_URL = os.environ.get("GITHUB_CONNECTOR_BASE_URL") or None
class HtmlBasedConnectorTransformLinksStrategy(str, Enum):
# remove links entirely
STRIP = "strip"
# turn HTML links into markdown links
MARKDOWN = "markdown"
HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY = os.environ.get(
"HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY",
HtmlBasedConnectorTransformLinksStrategy.STRIP,
)
PARSE_WITH_TRAFILATURA = os.environ.get("PARSE_WITH_TRAFILATURA", "").lower() == "true"
WEB_CONNECTOR_IGNORED_CLASSES = os.environ.get(
"WEB_CONNECTOR_IGNORED_CLASSES", "sidebar,footer"
).split(",")
WEB_CONNECTOR_IGNORED_ELEMENTS = os.environ.get(
"WEB_CONNECTOR_IGNORED_ELEMENTS", "nav,footer,meta,script,style,symbol,aside"
).split(",")
AIRTABLE_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("AIRTABLE_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
ASANA_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("ASANA_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
IMAP_CONNECTOR_SIZE_THRESHOLD = int(
os.environ.get("IMAP_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
)
ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS = os.environ.get(
"ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS", ""
).split(",")
_USER_NOT_FOUND = "Unknown Confluence User"
_COMMENT_EXPANSION_FIELDS = ["body.storage.value"]
_ATTACHMENT_EXPANSION_FIELDS = [
"version",
"space",
"metadata.labels",
]
_RESTRICTIONS_EXPANSION_FIELDS = [
"space",
"restrictions.read.restrictions.user",
"restrictions.read.restrictions.group",
"ancestors.restrictions.read.restrictions.user",
"ancestors.restrictions.read.restrictions.group",
]
_SLIM_DOC_BATCH_SIZE = 5000
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/notion_connector.py | common/data_source/notion_connector.py | import html
import logging
from collections.abc import Generator
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
from urllib.parse import urlparse
from retry import retry
from common.data_source.config import (
INDEX_BATCH_SIZE,
NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP,
DocumentSource,
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import (
LoadConnector,
PollConnector,
SecondsSinceUnixEpoch,
)
from common.data_source.models import (
Document,
GenerateDocumentsOutput,
NotionBlock,
NotionPage,
NotionSearchResponse,
TextSection,
)
from common.data_source.utils import (
batch_generator,
datetime_from_string,
fetch_notion_data,
filter_pages_by_time,
properties_to_str,
rl_requests,
)
class NotionConnector(LoadConnector, PollConnector):
"""Notion Page connector that reads all Notion pages this integration has access to.
Arguments:
batch_size (int): Number of objects to index in a batch
recursive_index_enabled (bool): Whether to recursively index child pages
root_page_id (str | None): Specific root page ID to start indexing from
"""
def __init__(
self,
batch_size: int = INDEX_BATCH_SIZE,
recursive_index_enabled: bool = not NOTION_CONNECTOR_DISABLE_RECURSIVE_PAGE_LOOKUP,
root_page_id: Optional[str] = None,
) -> None:
self.batch_size = batch_size
self.headers = {
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
self.indexed_pages: set[str] = set()
self.root_page_id = root_page_id
self.recursive_index_enabled = recursive_index_enabled or bool(root_page_id)
self.page_path_cache: dict[str, str] = {}
@retry(tries=3, delay=1, backoff=2)
def _fetch_child_blocks(self, block_id: str, cursor: Optional[str] = None) -> dict[str, Any] | None:
"""Fetch all child blocks via the Notion API."""
logging.debug(f"[Notion]: Fetching children of block with ID {block_id}")
block_url = f"https://api.notion.com/v1/blocks/{block_id}/children"
query_params = {"start_cursor": cursor} if cursor else None
try:
response = rl_requests.get(
block_url,
headers=self.headers,
params=query_params,
timeout=30,
)
response.raise_for_status()
return response.json()
except Exception as e:
if hasattr(e, "response") and e.response.status_code == 404:
logging.error(f"[Notion]: Unable to access block with ID {block_id}. This is likely due to the block not being shared with the integration.")
return None
else:
logging.exception(f"[Notion]: Error fetching blocks: {e}")
raise
@retry(tries=3, delay=1, backoff=2)
def _fetch_page(self, page_id: str) -> NotionPage:
"""Fetch a page from its ID via the Notion API."""
logging.debug(f"[Notion]: Fetching page for ID {page_id}")
page_url = f"https://api.notion.com/v1/pages/{page_id}"
try:
data = fetch_notion_data(page_url, self.headers, "GET")
return NotionPage(**data)
except Exception as e:
logging.warning(f"[Notion]: Failed to fetch page, trying database for ID {page_id}: {e}")
return self._fetch_database_as_page(page_id)
@retry(tries=3, delay=1, backoff=2)
def _fetch_database_as_page(self, database_id: str) -> NotionPage:
"""Attempt to fetch a database as a page."""
logging.debug(f"[Notion]: Fetching database for ID {database_id} as a page")
database_url = f"https://api.notion.com/v1/databases/{database_id}"
data = fetch_notion_data(database_url, self.headers, "GET")
database_name = data.get("title")
database_name = database_name[0].get("text", {}).get("content") if database_name else None
return NotionPage(**data, database_name=database_name)
@retry(tries=3, delay=1, backoff=2)
def _fetch_database(self, database_id: str, cursor: Optional[str] = None) -> dict[str, Any]:
"""Fetch a database from its ID via the Notion API."""
logging.debug(f"[Notion]: Fetching database for ID {database_id}")
block_url = f"https://api.notion.com/v1/databases/{database_id}/query"
body = {"start_cursor": cursor} if cursor else None
try:
data = fetch_notion_data(block_url, self.headers, "POST", body)
return data
except Exception as e:
if hasattr(e, "response") and e.response.status_code in [404, 400]:
logging.error(f"[Notion]: Unable to access database with ID {database_id}. This is likely due to the database not being shared with the integration.")
return {"results": [], "next_cursor": None}
raise
def _read_pages_from_database(self, database_id: str) -> tuple[list[NotionBlock], list[str]]:
"""Returns a list of top level blocks and all page IDs in the database."""
result_blocks: list[NotionBlock] = []
result_pages: list[str] = []
cursor = None
while True:
data = self._fetch_database(database_id, cursor)
for result in data["results"]:
obj_id = result["id"]
obj_type = result["object"]
text = properties_to_str(result.get("properties", {}))
if text:
result_blocks.append(NotionBlock(id=obj_id, text=text, prefix="\n"))
if self.recursive_index_enabled:
if obj_type == "page":
logging.debug(f"[Notion]: Found page with ID {obj_id} in database {database_id}")
result_pages.append(result["id"])
elif obj_type == "database":
logging.debug(f"[Notion]: Found database with ID {obj_id} in database {database_id}")
_, child_pages = self._read_pages_from_database(obj_id)
result_pages.extend(child_pages)
if data["next_cursor"] is None:
break
cursor = data["next_cursor"]
return result_blocks, result_pages
def _extract_rich_text(self, rich_text_array: list[dict[str, Any]]) -> str:
collected_text: list[str] = []
for rich_text in rich_text_array:
content = ""
r_type = rich_text.get("type")
if r_type == "equation":
expr = rich_text.get("equation", {}).get("expression")
if expr:
content = expr
elif r_type == "mention":
mention = rich_text.get("mention", {}) or {}
mention_type = mention.get("type")
mention_value = mention.get(mention_type, {}) if mention_type else {}
if mention_type == "date":
start = mention_value.get("start")
end = mention_value.get("end")
if start and end:
content = f"{start} - {end}"
elif start:
content = start
elif mention_type in {"page", "database"}:
content = mention_value.get("id", rich_text.get("plain_text", ""))
elif mention_type == "link_preview":
content = mention_value.get("url", rich_text.get("plain_text", ""))
else:
content = rich_text.get("plain_text", "") or str(mention_value)
else:
if rich_text.get("plain_text"):
content = rich_text["plain_text"]
elif "text" in rich_text and rich_text["text"].get("content"):
content = rich_text["text"]["content"]
href = rich_text.get("href")
if content and href:
content = f"{content} ({href})"
if content:
collected_text.append(content)
return "".join(collected_text).strip()
def _build_table_html(self, table_block_id: str) -> str | None:
rows: list[str] = []
cursor = None
while True:
data = self._fetch_child_blocks(table_block_id, cursor)
if data is None:
break
for result in data["results"]:
if result.get("type") != "table_row":
continue
cells_html: list[str] = []
for cell in result["table_row"].get("cells", []):
cell_text = self._extract_rich_text(cell)
cell_html = html.escape(cell_text) if cell_text else ""
cells_html.append(f"<td>{cell_html}</td>")
rows.append(f"<tr>{''.join(cells_html)}</tr>")
if data.get("next_cursor") is None:
break
cursor = data["next_cursor"]
if not rows:
return None
return "<table>\n" + "\n".join(rows) + "\n</table>"
def _download_file(self, url: str) -> bytes | None:
try:
response = rl_requests.get(url, timeout=60)
response.raise_for_status()
return response.content
except Exception as exc:
logging.warning(f"[Notion]: Failed to download Notion file from {url}: {exc}")
return None
def _append_block_id_to_name(self, name: str, block_id: Optional[str]) -> str:
"""Append the Notion block ID to the filename while keeping the extension."""
if not block_id:
return name
path = Path(name)
stem = path.stem or name
suffix = path.suffix
if not stem:
return name
return f"{stem}_{block_id}{suffix}" if suffix else f"{stem}_{block_id}"
def _extract_file_metadata(self, result_obj: dict[str, Any], block_id: str) -> tuple[str | None, str, str | None]:
file_source_type = result_obj.get("type")
file_source = result_obj.get(file_source_type, {}) if file_source_type else {}
url = file_source.get("url")
name = result_obj.get("name") or file_source.get("name")
if url and not name:
parsed_name = Path(urlparse(url).path).name
name = parsed_name or f"notion_file_{block_id}"
elif not name:
name = f"notion_file_{block_id}"
name = self._append_block_id_to_name(name, block_id)
caption = self._extract_rich_text(result_obj.get("caption", [])) if "caption" in result_obj else None
return url, name, caption
def _build_attachment_document(
self,
block_id: str,
url: str,
name: str,
caption: Optional[str],
page_last_edited_time: Optional[str],
page_path: Optional[str],
) -> Document | None:
file_bytes = self._download_file(url)
if file_bytes is None:
return None
extension = Path(name).suffix or Path(urlparse(url).path).suffix or ".bin"
if extension and not extension.startswith("."):
extension = f".{extension}"
if not extension:
extension = ".bin"
updated_at = datetime_from_string(page_last_edited_time) if page_last_edited_time else datetime.now(timezone.utc)
base_identifier = name or caption or (f"Notion file {block_id}" if block_id else "Notion file")
semantic_identifier = f"{page_path} / {base_identifier}" if page_path else base_identifier
return Document(
id=block_id,
blob=file_bytes,
source=DocumentSource.NOTION,
semantic_identifier=semantic_identifier,
extension=extension,
size_bytes=len(file_bytes),
doc_updated_at=updated_at,
)
def _read_blocks(self, base_block_id: str, page_last_edited_time: Optional[str] = None, page_path: Optional[str] = None) -> tuple[list[NotionBlock], list[str], list[Document]]:
result_blocks: list[NotionBlock] = []
child_pages: list[str] = []
attachments: list[Document] = []
cursor = None
while True:
data = self._fetch_child_blocks(base_block_id, cursor)
if data is None:
return result_blocks, child_pages, attachments
for result in data["results"]:
logging.debug(f"[Notion]: Found child block for block with ID {base_block_id}: {result}")
result_block_id = result["id"]
result_type = result["type"]
result_obj = result[result_type]
if result_type in ["ai_block", "unsupported", "external_object_instance_page"]:
logging.warning(f"[Notion]: Skipping unsupported block type {result_type}")
continue
if result_type == "table":
table_html = self._build_table_html(result_block_id)
if table_html:
result_blocks.append(
NotionBlock(
id=result_block_id,
text=table_html,
prefix="\n\n",
)
)
continue
if result_type == "equation":
expr = result_obj.get("expression")
if expr:
result_blocks.append(
NotionBlock(
id=result_block_id,
text=expr,
prefix="\n",
)
)
continue
cur_result_text_arr = []
if "rich_text" in result_obj:
text = self._extract_rich_text(result_obj["rich_text"])
if text:
cur_result_text_arr.append(text)
if result_type == "bulleted_list_item":
if cur_result_text_arr:
cur_result_text_arr[0] = f"- {cur_result_text_arr[0]}"
else:
cur_result_text_arr = ["- "]
if result_type == "numbered_list_item":
if cur_result_text_arr:
cur_result_text_arr[0] = f"1. {cur_result_text_arr[0]}"
else:
cur_result_text_arr = ["1. "]
if result_type == "to_do":
checked = result_obj.get("checked")
checkbox_prefix = "[x]" if checked else "[ ]"
if cur_result_text_arr:
cur_result_text_arr = [f"{checkbox_prefix} {cur_result_text_arr[0]}"] + cur_result_text_arr[1:]
else:
cur_result_text_arr = [checkbox_prefix]
if result_type in {"file", "image", "pdf", "video", "audio"}:
file_url, file_name, caption = self._extract_file_metadata(result_obj, result_block_id)
if file_url:
attachment_doc = self._build_attachment_document(
block_id=result_block_id,
url=file_url,
name=file_name,
caption=caption,
page_last_edited_time=page_last_edited_time,
page_path=page_path,
)
if attachment_doc:
attachments.append(attachment_doc)
attachment_label = file_name
if caption:
attachment_label = f"{file_name} ({caption})"
if attachment_label:
cur_result_text_arr.append(f"{result_type.capitalize()}: {attachment_label}")
if result["has_children"]:
if result_type == "child_page":
child_pages.append(result_block_id)
else:
logging.debug(f"[Notion]: Entering sub-block: {result_block_id}")
subblocks, subblock_child_pages, subblock_attachments = self._read_blocks(result_block_id, page_last_edited_time, page_path)
logging.debug(f"[Notion]: Finished sub-block: {result_block_id}")
result_blocks.extend(subblocks)
child_pages.extend(subblock_child_pages)
attachments.extend(subblock_attachments)
if result_type == "child_database":
inner_blocks, inner_child_pages = self._read_pages_from_database(result_block_id)
result_blocks.extend(inner_blocks)
if self.recursive_index_enabled:
child_pages.extend(inner_child_pages)
if cur_result_text_arr:
new_block = NotionBlock(
id=result_block_id,
text="\n".join(cur_result_text_arr),
prefix="\n",
)
result_blocks.append(new_block)
if data["next_cursor"] is None:
break
cursor = data["next_cursor"]
return result_blocks, child_pages, attachments
def _read_page_title(self, page: NotionPage) -> Optional[str]:
"""Extracts the title from a Notion page."""
if hasattr(page, "database_name") and page.database_name:
return page.database_name
for _, prop in page.properties.items():
if prop["type"] == "title" and len(prop["title"]) > 0:
page_title = " ".join([t["plain_text"] for t in prop["title"]]).strip()
return page_title
return None
def _build_page_path(self, page: NotionPage, visited: Optional[set[str]] = None) -> Optional[str]:
"""Construct a hierarchical path for a page based on its parent chain."""
if page.id in self.page_path_cache:
return self.page_path_cache[page.id]
visited = visited or set()
if page.id in visited:
logging.warning(f"[Notion]: Detected cycle while building path for page {page.id}")
return self._read_page_title(page)
visited.add(page.id)
current_title = self._read_page_title(page) or f"Untitled Page {page.id}"
parent_info = getattr(page, "parent", None) or {}
parent_type = parent_info.get("type")
parent_id = parent_info.get(parent_type) if parent_type else None
parent_path = None
if parent_type in {"page_id", "database_id"} and isinstance(parent_id, str):
try:
parent_page = self._fetch_page(parent_id)
parent_path = self._build_page_path(parent_page, visited)
except Exception as exc:
logging.warning(f"[Notion]: Failed to resolve parent {parent_id} for page {page.id}: {exc}")
full_path = f"{parent_path} / {current_title}" if parent_path else current_title
self.page_path_cache[page.id] = full_path
return full_path
def _read_pages(self, pages: list[NotionPage], start: SecondsSinceUnixEpoch | None = None, end: SecondsSinceUnixEpoch | None = None) -> Generator[Document, None, None]:
"""Reads pages for rich text content and generates Documents."""
all_child_page_ids: list[str] = []
for page in pages:
if isinstance(page, dict):
page = NotionPage(**page)
if page.id in self.indexed_pages:
logging.debug(f"[Notion]: Already indexed page with ID {page.id}. Skipping.")
continue
if start is not None and end is not None:
page_ts = datetime_from_string(page.last_edited_time).timestamp()
if not (page_ts > start and page_ts <= end):
logging.debug(f"[Notion]: Skipping page {page.id} outside polling window.")
continue
logging.info(f"[Notion]: Reading page with ID {page.id}, with url {page.url}")
page_path = self._build_page_path(page)
page_blocks, child_page_ids, attachment_docs = self._read_blocks(page.id, page.last_edited_time, page_path)
all_child_page_ids.extend(child_page_ids)
self.indexed_pages.add(page.id)
raw_page_title = self._read_page_title(page)
page_title = raw_page_title or f"Untitled Page with ID {page.id}"
# Append the page id to help disambiguate duplicate names
base_identifier = page_path or page_title
semantic_identifier = f"{base_identifier}_{page.id}" if base_identifier else page.id
if not page_blocks:
if not raw_page_title:
logging.warning(f"[Notion]: No blocks OR title found for page with ID {page.id}. Skipping.")
continue
text = page_title
if page.properties:
text += "\n\n" + "\n".join([f"{key}: {value}" for key, value in page.properties.items()])
sections = [TextSection(link=page.url, text=text)]
else:
sections = [
TextSection(
link=f"{page.url}#{block.id.replace('-', '')}",
text=block.prefix + block.text,
)
for block in page_blocks
]
joined_text = "\n".join(sec.text for sec in sections)
blob = joined_text.encode("utf-8")
yield Document(
id=page.id, blob=blob, source=DocumentSource.NOTION, semantic_identifier=semantic_identifier, extension=".txt", size_bytes=len(blob), doc_updated_at=datetime_from_string(page.last_edited_time)
)
for attachment_doc in attachment_docs:
yield attachment_doc
if self.recursive_index_enabled and all_child_page_ids:
for child_page_batch_ids in batch_generator(all_child_page_ids, INDEX_BATCH_SIZE):
child_page_batch = [self._fetch_page(page_id) for page_id in child_page_batch_ids if page_id not in self.indexed_pages]
yield from self._read_pages(child_page_batch, start, end)
@retry(tries=3, delay=1, backoff=2)
def _search_notion(self, query_dict: dict[str, Any]) -> NotionSearchResponse:
"""Search for pages from a Notion database."""
logging.debug(f"[Notion]: Searching for pages in Notion with query_dict: {query_dict}")
data = fetch_notion_data("https://api.notion.com/v1/search", self.headers, "POST", query_dict)
return NotionSearchResponse(**data)
def _recursive_load(self, start: SecondsSinceUnixEpoch | None = None, end: SecondsSinceUnixEpoch | None = None) -> Generator[list[Document], None, None]:
"""Recursively load pages starting from root page ID."""
if self.root_page_id is None or not self.recursive_index_enabled:
raise RuntimeError("Recursive page lookup is not enabled")
logging.info(f"[Notion]: Recursively loading pages from Notion based on root page with ID: {self.root_page_id}")
pages = [self._fetch_page(page_id=self.root_page_id)]
yield from batch_generator(self._read_pages(pages, start, end), self.batch_size)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Applies integration token to headers."""
self.headers["Authorization"] = f"Bearer {credentials['notion_integration_token']}"
return None
def load_from_state(self) -> GenerateDocumentsOutput:
"""Loads all page data from a Notion workspace."""
if self.recursive_index_enabled and self.root_page_id:
yield from self._recursive_load()
return
query_dict = {
"filter": {"property": "object", "value": "page"},
"page_size": 100,
}
while True:
db_res = self._search_notion(query_dict)
pages = [NotionPage(**page) for page in db_res.results]
yield from batch_generator(self._read_pages(pages), self.batch_size)
if db_res.has_more:
query_dict["start_cursor"] = db_res.next_cursor
else:
break
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Notion for updated pages within a time period."""
if self.recursive_index_enabled and self.root_page_id:
yield from self._recursive_load(start, end)
return
query_dict = {
"page_size": 100,
"sort": {"timestamp": "last_edited_time", "direction": "descending"},
"filter": {"property": "object", "value": "page"},
}
while True:
db_res = self._search_notion(query_dict)
pages = filter_pages_by_time(db_res.results, start, end, "last_edited_time")
if pages:
yield from batch_generator(self._read_pages(pages, start, end), self.batch_size)
if db_res.has_more:
query_dict["start_cursor"] = db_res.next_cursor
else:
break
else:
break
def validate_connector_settings(self) -> None:
"""Validate Notion connector settings and credentials."""
if not self.headers.get("Authorization"):
raise ConnectorMissingCredentialError("Notion credentials not loaded.")
try:
if self.root_page_id:
response = rl_requests.get(
f"https://api.notion.com/v1/pages/{self.root_page_id}",
headers=self.headers,
timeout=30,
)
else:
test_query = {"filter": {"property": "object", "value": "page"}, "page_size": 1}
response = rl_requests.post(
"https://api.notion.com/v1/search",
headers=self.headers,
json=test_query,
timeout=30,
)
response.raise_for_status()
except rl_requests.exceptions.HTTPError as http_err:
status_code = http_err.response.status_code if http_err.response else None
if status_code == 401:
raise CredentialExpiredError("Notion credential appears to be invalid or expired (HTTP 401).")
elif status_code == 403:
raise InsufficientPermissionsError("Your Notion token does not have sufficient permissions (HTTP 403).")
elif status_code == 404:
raise ConnectorValidationError("Notion resource not found or not shared with the integration (HTTP 404).")
elif status_code == 429:
raise ConnectorValidationError("Validation failed due to Notion rate-limits being exceeded (HTTP 429).")
else:
raise UnexpectedValidationError(f"Unexpected Notion HTTP error (status={status_code}): {http_err}")
except Exception as exc:
raise UnexpectedValidationError(f"Unexpected error during Notion settings validation: {exc}")
if __name__ == "__main__":
import os
root_page_id = os.environ.get("NOTION_ROOT_PAGE_ID")
connector = NotionConnector(root_page_id=root_page_id)
connector.load_credentials({"notion_integration_token": os.environ.get("NOTION_INTEGRATION_TOKEN")})
document_batches = connector.load_from_state()
for doc_batch in document_batches:
for doc in doc_batch:
print(doc) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/__init__.py | common/data_source/__init__.py |
"""
Thanks to https://github.com/onyx-dot-app/onyx
Content of this directory is under the "MIT Expat" license as defined below.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .blob_connector import BlobStorageConnector
from .slack_connector import SlackConnector
from .gmail_connector import GmailConnector
from .notion_connector import NotionConnector
from .confluence_connector import ConfluenceConnector
from .discord_connector import DiscordConnector
from .dropbox_connector import DropboxConnector
from .google_drive.connector import GoogleDriveConnector
from .jira.connector import JiraConnector
from .sharepoint_connector import SharePointConnector
from .teams_connector import TeamsConnector
from .moodle_connector import MoodleConnector
from .airtable_connector import AirtableConnector
from .asana_connector import AsanaConnector
from .imap_connector import ImapConnector
from .zendesk_connector import ZendeskConnector
from .config import BlobType, DocumentSource
from .models import Document, TextSection, ImageSection, BasicExpertInfo
from .exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError
)
__all__ = [
"BlobStorageConnector",
"SlackConnector",
"GmailConnector",
"NotionConnector",
"ConfluenceConnector",
"DiscordConnector",
"DropboxConnector",
"GoogleDriveConnector",
"JiraConnector",
"SharePointConnector",
"TeamsConnector",
"MoodleConnector",
"BlobType",
"DocumentSource",
"Document",
"TextSection",
"ImageSection",
"BasicExpertInfo",
"ConnectorMissingCredentialError",
"ConnectorValidationError",
"CredentialExpiredError",
"InsufficientPermissionsError",
"UnexpectedValidationError",
"AirtableConnector",
"AsanaConnector",
"ImapConnector",
"ZendeskConnector",
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/discord_connector.py | common/data_source/discord_connector.py | """Discord connector"""
import asyncio
import logging
import os
from datetime import datetime, timezone
from typing import Any, AsyncIterable, Iterable
from discord import Client, MessageType
from discord.channel import TextChannel, Thread
from discord.flags import Intents
from discord.message import Message as DiscordMessage
from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import ConnectorMissingCredentialError
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch
from common.data_source.models import Document, GenerateDocumentsOutput, TextSection
_DISCORD_DOC_ID_PREFIX = "DISCORD_"
_SNIPPET_LENGTH = 30
def _convert_message_to_document(
message: DiscordMessage,
sections: list[TextSection],
) -> Document:
"""
Convert a discord message to a document
Sections are collected before calling this function because it relies on async
calls to fetch the thread history if there is one
"""
metadata: dict[str, str | list[str]] = {}
semantic_substring = ""
# Only messages from TextChannels will make it here, but we have to check for it anyway
if isinstance(message.channel, TextChannel) and (channel_name := message.channel.name):
metadata["Channel"] = channel_name
semantic_substring += f" in Channel: #{channel_name}"
# If there is a thread, add more detail to the metadata, title, and semantic identifier
if isinstance(message.channel, Thread):
# Threads do have a title
title = message.channel.name
# Add more detail to the semantic identifier if available
semantic_substring += f" in Thread: {title}"
snippet: str = message.content[:_SNIPPET_LENGTH].rstrip() + "..." if len(message.content) > _SNIPPET_LENGTH else message.content
semantic_identifier = f"{message.author.name} said{semantic_substring}: {snippet}"
# fallback to created_at
doc_updated_at = message.edited_at if message.edited_at else message.created_at
if doc_updated_at and doc_updated_at.tzinfo is None:
doc_updated_at = doc_updated_at.replace(tzinfo=timezone.utc)
elif doc_updated_at:
doc_updated_at = doc_updated_at.astimezone(timezone.utc)
return Document(
id=f"{_DISCORD_DOC_ID_PREFIX}{message.id}",
source=DocumentSource.DISCORD,
semantic_identifier=semantic_identifier,
doc_updated_at=doc_updated_at,
blob=message.content.encode("utf-8"),
extension=".txt",
size_bytes=len(message.content.encode("utf-8")),
metadata=metadata if metadata else None,
)
async def _fetch_filtered_channels(
discord_client: Client,
server_ids: list[int] | None,
channel_names: list[str] | None,
) -> list[TextChannel]:
filtered_channels: list[TextChannel] = []
for channel in discord_client.get_all_channels():
if not channel.permissions_for(channel.guild.me).read_message_history:
continue
if not isinstance(channel, TextChannel):
continue
if server_ids and len(server_ids) > 0 and channel.guild.id not in server_ids:
continue
if channel_names and channel.name not in channel_names:
continue
filtered_channels.append(channel)
logging.info(f"Found {len(filtered_channels)} channels for the authenticated user")
return filtered_channels
async def _fetch_documents_from_channel(
channel: TextChannel,
start_time: datetime | None,
end_time: datetime | None,
) -> AsyncIterable[Document]:
# Discord's epoch starts at 2015-01-01
discord_epoch = datetime(2015, 1, 1, tzinfo=timezone.utc)
if start_time and start_time < discord_epoch:
start_time = discord_epoch
# NOTE: limit=None is the correct way to fetch all messages and threads with pagination
# The discord package erroneously uses limit for both pagination AND number of results
# This causes the history and archived_threads methods to return 100 results even if there are more results within the filters
# Pagination is handled automatically (100 results at a time) when limit=None
async for channel_message in channel.history(
limit=None,
after=start_time,
before=end_time,
):
# Skip messages that are not the default type
if channel_message.type != MessageType.default:
continue
sections: list[TextSection] = [
TextSection(
text=channel_message.content,
link=channel_message.jump_url,
)
]
yield _convert_message_to_document(channel_message, sections)
for active_thread in channel.threads:
async for thread_message in active_thread.history(
limit=None,
after=start_time,
before=end_time,
):
# Skip messages that are not the default type
if thread_message.type != MessageType.default:
continue
sections = [
TextSection(
text=thread_message.content,
link=thread_message.jump_url,
)
]
yield _convert_message_to_document(thread_message, sections)
async for archived_thread in channel.archived_threads(
limit=None,
):
async for thread_message in archived_thread.history(
limit=None,
after=start_time,
before=end_time,
):
# Skip messages that are not the default type
if thread_message.type != MessageType.default:
continue
sections = [
TextSection(
text=thread_message.content,
link=thread_message.jump_url,
)
]
yield _convert_message_to_document(thread_message, sections)
def _manage_async_retrieval(
token: str,
requested_start_date_string: str,
channel_names: list[str],
server_ids: list[int],
start: datetime | None = None,
end: datetime | None = None,
) -> Iterable[Document]:
# parse requested_start_date_string to datetime
pull_date: datetime | None = datetime.strptime(requested_start_date_string, "%Y-%m-%d").replace(tzinfo=timezone.utc) if requested_start_date_string else None
# Set start_time to the most recent of start and pull_date, or whichever is provided
start_time = max(filter(None, [start, pull_date])) if start or pull_date else None
end_time: datetime | None = end
proxy_url: str | None = os.environ.get("https_proxy") or os.environ.get("http_proxy")
if proxy_url:
logging.info(f"Using proxy for Discord: {proxy_url}")
async def _async_fetch() -> AsyncIterable[Document]:
intents = Intents.default()
intents.message_content = True
async with Client(intents=intents, proxy=proxy_url) as cli:
asyncio.create_task(coro=cli.start(token))
await cli.wait_until_ready()
filtered_channels: list[TextChannel] = await _fetch_filtered_channels(
discord_client=cli,
server_ids=server_ids,
channel_names=channel_names,
)
for channel in filtered_channels:
async for doc in _fetch_documents_from_channel(
channel=channel,
start_time=start_time,
end_time=end_time,
):
print(doc)
yield doc
def run_and_yield() -> Iterable[Document]:
loop = asyncio.new_event_loop()
try:
# Get the async generator
async_gen = _async_fetch()
# Convert to AsyncIterator
async_iter = async_gen.__aiter__()
while True:
try:
# Create a coroutine by calling anext with the async iterator
next_coro = anext(async_iter)
# Run the coroutine to get the next document
doc = loop.run_until_complete(next_coro)
yield doc
except StopAsyncIteration:
break
finally:
loop.close()
return run_and_yield()
class DiscordConnector(LoadConnector, PollConnector):
"""Discord connector for accessing Discord messages and channels"""
def __init__(
self,
server_ids: list[str] | None = None,
channel_names: list[str] | None = None,
# YYYY-MM-DD
start_date: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
):
self.batch_size = batch_size
self.channel_names: list[str] = channel_names if channel_names else []
self.server_ids: list[int] = [int(server_id) for server_id in server_ids] if server_ids else []
self._discord_bot_token: str | None = None
self.requested_start_date_string: str = start_date or ""
@property
def discord_bot_token(self) -> str:
if self._discord_bot_token is None:
raise ConnectorMissingCredentialError("Discord")
return self._discord_bot_token
def _manage_doc_batching(
self,
start: datetime | None = None,
end: datetime | None = None,
) -> GenerateDocumentsOutput:
doc_batch = []
def merge_batch():
nonlocal doc_batch
id = doc_batch[0].id
min_updated_at = doc_batch[0].doc_updated_at
max_updated_at = doc_batch[-1].doc_updated_at
blob = b''
size_bytes = 0
for d in doc_batch:
min_updated_at = min(min_updated_at, d.doc_updated_at)
max_updated_at = max(max_updated_at, d.doc_updated_at)
blob += b'\n\n' + d.blob
size_bytes += d.size_bytes
return Document(
id=id,
source=DocumentSource.DISCORD,
semantic_identifier=f"{min_updated_at} -> {max_updated_at}",
doc_updated_at=max_updated_at,
blob=blob,
extension=".txt",
size_bytes=size_bytes,
)
for doc in _manage_async_retrieval(
token=self.discord_bot_token,
requested_start_date_string=self.requested_start_date_string,
channel_names=self.channel_names,
server_ids=self.server_ids,
start=start,
end=end,
):
doc_batch.append(doc)
if len(doc_batch) >= self.batch_size:
yield [merge_batch()]
doc_batch = []
if doc_batch:
yield [merge_batch()]
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._discord_bot_token = credentials["discord_bot_token"]
return None
def validate_connector_settings(self) -> None:
"""Validate Discord connector settings"""
if not self.discord_client:
raise ConnectorMissingCredentialError("Discord")
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Any:
"""Poll Discord for recent messages"""
return self._manage_doc_batching(
datetime.fromtimestamp(start, tz=timezone.utc),
datetime.fromtimestamp(end, tz=timezone.utc),
)
def load_from_state(self) -> Any:
"""Load messages from Discord state"""
return self._manage_doc_batching(None, None)
if __name__ == "__main__":
import os
import time
end = time.time()
# 1 day
start = end - 24 * 60 * 60 * 1
# "1,2,3"
server_ids: str | None = os.environ.get("server_ids", None)
# "channel1,channel2"
channel_names: str | None = os.environ.get("channel_names", None)
connector = DiscordConnector(
server_ids=server_ids.split(",") if server_ids else [],
channel_names=channel_names.split(",") if channel_names else [],
start_date=os.environ.get("start_date", None),
)
connector.load_credentials({"discord_bot_token": os.environ.get("discord_bot_token")})
for doc_batch in connector.poll_source(start, end):
for doc in doc_batch:
print(doc)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/connector_runner.py | common/data_source/connector_runner.py | import sys
import time
import logging
from collections.abc import Generator
from datetime import datetime
from typing import Generic
from typing import TypeVar
from common.data_source.interfaces import (
BaseConnector,
CheckpointedConnector,
CheckpointedConnectorWithPermSync,
CheckpointOutput,
LoadConnector,
PollConnector,
)
from common.data_source.models import ConnectorCheckpoint, ConnectorFailure, Document
TimeRange = tuple[datetime, datetime]
CT = TypeVar("CT", bound=ConnectorCheckpoint)
def batched_doc_ids(
checkpoint_connector_generator: CheckpointOutput[CT],
batch_size: int,
) -> Generator[set[str], None, None]:
batch: set[str] = set()
for document, failure, next_checkpoint in CheckpointOutputWrapper[CT]()(
checkpoint_connector_generator
):
if document is not None:
batch.add(document.id)
elif (
failure and failure.failed_document and failure.failed_document.document_id
):
batch.add(failure.failed_document.document_id)
if len(batch) >= batch_size:
yield batch
batch = set()
if len(batch) > 0:
yield batch
class CheckpointOutputWrapper(Generic[CT]):
"""
Wraps a CheckpointOutput generator to give things back in a more digestible format,
specifically for Document outputs.
The connector format is easier for the connector implementor (e.g. it enforces exactly
one new checkpoint is returned AND that the checkpoint is at the end), thus the different
formats.
"""
def __init__(self) -> None:
self.next_checkpoint: CT | None = None
def __call__(
self,
checkpoint_connector_generator: CheckpointOutput[CT],
) -> Generator[
tuple[Document | None, ConnectorFailure | None, CT | None],
None,
None,
]:
# grabs the final return value and stores it in the `next_checkpoint` variable
def _inner_wrapper(
checkpoint_connector_generator: CheckpointOutput[CT],
) -> CheckpointOutput[CT]:
self.next_checkpoint = yield from checkpoint_connector_generator
return self.next_checkpoint # not used
for document_or_failure in _inner_wrapper(checkpoint_connector_generator):
if isinstance(document_or_failure, Document):
yield document_or_failure, None, None
elif isinstance(document_or_failure, ConnectorFailure):
yield None, document_or_failure, None
else:
raise ValueError(
f"Invalid document_or_failure type: {type(document_or_failure)}"
)
if self.next_checkpoint is None:
raise RuntimeError(
"Checkpoint is None. This should never happen - the connector should always return a checkpoint."
)
yield None, None, self.next_checkpoint
class ConnectorRunner(Generic[CT]):
"""
Handles:
- Batching
- Additional exception logging
- Combining different connector types to a single interface
"""
def __init__(
self,
connector: BaseConnector,
batch_size: int,
# cannot be True for non-checkpointed connectors
include_permissions: bool,
time_range: TimeRange | None = None,
):
if not isinstance(connector, CheckpointedConnector) and include_permissions:
raise ValueError(
"include_permissions cannot be True for non-checkpointed connectors"
)
self.connector = connector
self.time_range = time_range
self.batch_size = batch_size
self.include_permissions = include_permissions
self.doc_batch: list[Document] = []
def run(self, checkpoint: CT) -> Generator[
tuple[list[Document] | None, ConnectorFailure | None, CT | None],
None,
None,
]:
"""Adds additional exception logging to the connector."""
try:
if isinstance(self.connector, CheckpointedConnector):
if self.time_range is None:
raise ValueError("time_range is required for CheckpointedConnector")
start = time.monotonic()
if self.include_permissions:
if not isinstance(
self.connector, CheckpointedConnectorWithPermSync
):
raise ValueError(
"Connector does not support permission syncing"
)
load_from_checkpoint = (
self.connector.load_from_checkpoint_with_perm_sync
)
else:
load_from_checkpoint = self.connector.load_from_checkpoint
checkpoint_connector_generator = load_from_checkpoint(
start=self.time_range[0].timestamp(),
end=self.time_range[1].timestamp(),
checkpoint=checkpoint,
)
next_checkpoint: CT | None = None
# this is guaranteed to always run at least once with next_checkpoint being non-None
for document, failure, next_checkpoint in CheckpointOutputWrapper[CT]()(
checkpoint_connector_generator
):
if document is not None and isinstance(document, Document):
self.doc_batch.append(document)
if failure is not None:
yield None, failure, None
if len(self.doc_batch) >= self.batch_size:
yield self.doc_batch, None, None
self.doc_batch = []
# yield remaining documents
if len(self.doc_batch) > 0:
yield self.doc_batch, None, None
self.doc_batch = []
yield None, None, next_checkpoint
logging.debug(
f"Connector took {time.monotonic() - start} seconds to get to the next checkpoint."
)
else:
finished_checkpoint = self.connector.build_dummy_checkpoint()
finished_checkpoint.has_more = False
if isinstance(self.connector, PollConnector):
if self.time_range is None:
raise ValueError("time_range is required for PollConnector")
for document_batch in self.connector.poll_source(
start=self.time_range[0].timestamp(),
end=self.time_range[1].timestamp(),
):
yield document_batch, None, None
yield None, None, finished_checkpoint
elif isinstance(self.connector, LoadConnector):
for document_batch in self.connector.load_from_state():
yield document_batch, None, None
yield None, None, finished_checkpoint
else:
raise ValueError(f"Invalid connector. type: {type(self.connector)}")
except Exception:
exc_type, _, exc_traceback = sys.exc_info()
# Traverse the traceback to find the last frame where the exception was raised
tb = exc_traceback
if tb is None:
logging.error("No traceback found for exception")
raise
while tb.tb_next:
tb = tb.tb_next # Move to the next frame in the traceback
# Get the local variables from the frame where the exception occurred
local_vars = tb.tb_frame.f_locals
local_vars_str = "\n".join(
f"{key}: {value}" for key, value in local_vars.items()
)
logging.error(
f"Error in connector. type: {exc_type};\n"
f"local_vars below -> \n{local_vars_str[:1024]}"
)
raise | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/html_utils.py | common/data_source/html_utils.py | import logging
import re
from copy import copy
from dataclasses import dataclass
from io import BytesIO
from typing import IO
import bs4
from common.data_source.config import HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY, \
HtmlBasedConnectorTransformLinksStrategy, WEB_CONNECTOR_IGNORED_CLASSES, WEB_CONNECTOR_IGNORED_ELEMENTS, \
PARSE_WITH_TRAFILATURA
MINTLIFY_UNWANTED = ["sticky", "hidden"]
@dataclass
class ParsedHTML:
title: str | None
cleaned_text: str
def strip_excessive_newlines_and_spaces(document: str) -> str:
# collapse repeated spaces into one
document = re.sub(r" +", " ", document)
# remove trailing spaces
document = re.sub(r" +[\n\r]", "\n", document)
# remove repeated newlines
document = re.sub(r"[\n\r]+", "\n", document)
return document.strip()
def strip_newlines(document: str) -> str:
# HTML might contain newlines which are just whitespaces to a browser
return re.sub(r"[\n\r]+", " ", document)
def format_element_text(element_text: str, link_href: str | None) -> str:
element_text_no_newlines = strip_newlines(element_text)
if (
not link_href
or HTML_BASED_CONNECTOR_TRANSFORM_LINKS_STRATEGY
== HtmlBasedConnectorTransformLinksStrategy.STRIP
):
return element_text_no_newlines
return f"[{element_text_no_newlines}]({link_href})"
def parse_html_with_trafilatura(html_content: str) -> str:
"""Parse HTML content using trafilatura."""
import trafilatura # type: ignore
from trafilatura.settings import use_config # type: ignore
config = use_config()
config.set("DEFAULT", "include_links", "True")
config.set("DEFAULT", "include_tables", "True")
config.set("DEFAULT", "include_images", "True")
config.set("DEFAULT", "include_formatting", "True")
extracted_text = trafilatura.extract(html_content, config=config)
return strip_excessive_newlines_and_spaces(extracted_text) if extracted_text else ""
def format_document_soup(
document: bs4.BeautifulSoup, table_cell_separator: str = "\t"
) -> str:
"""Format html to a flat text document.
The following goals:
- Newlines from within the HTML are removed (as browser would ignore them as well).
- Repeated newlines/spaces are removed (as browsers would ignore them).
- Newlines only before and after headlines and paragraphs or when explicit (br or pre tag)
- Table columns/rows are separated by newline
- List elements are separated by newline and start with a hyphen
"""
text = ""
list_element_start = False
verbatim_output = 0
in_table = False
last_added_newline = False
link_href: str | None = None
for e in document.descendants:
verbatim_output -= 1
if isinstance(e, bs4.element.NavigableString):
if isinstance(e, (bs4.element.Comment, bs4.element.Doctype)):
continue
element_text = e.text
if in_table:
# Tables are represented in natural language with rows separated by newlines
# Can't have newlines then in the table elements
element_text = element_text.replace("\n", " ").strip()
# Some tags are translated to spaces but in the logic underneath this section, we
# translate them to newlines as a browser should render them such as with br
# This logic here avoids a space after newline when it shouldn't be there.
if last_added_newline and element_text.startswith(" "):
element_text = element_text[1:]
last_added_newline = False
if element_text:
content_to_add = (
element_text
if verbatim_output > 0
else format_element_text(element_text, link_href)
)
# Don't join separate elements without any spacing
if (text and not text[-1].isspace()) and (
content_to_add and not content_to_add[0].isspace()
):
text += " "
text += content_to_add
list_element_start = False
elif isinstance(e, bs4.element.Tag):
# table is standard HTML element
if e.name == "table":
in_table = True
# TR is for rows
elif e.name == "tr" and in_table:
text += "\n"
# td for data cell, th for header
elif e.name in ["td", "th"] and in_table:
text += table_cell_separator
elif e.name == "/table":
in_table = False
elif in_table:
# don't handle other cases while in table
pass
elif e.name == "a":
href_value = e.get("href", None)
# mostly for typing, having multiple hrefs is not valid HTML
link_href = (
href_value[0] if isinstance(href_value, list) else href_value
)
elif e.name == "/a":
link_href = None
elif e.name in ["p", "div"]:
if not list_element_start:
text += "\n"
elif e.name in ["h1", "h2", "h3", "h4"]:
text += "\n"
list_element_start = False
last_added_newline = True
elif e.name == "br":
text += "\n"
list_element_start = False
last_added_newline = True
elif e.name == "li":
text += "\n- "
list_element_start = True
elif e.name == "pre":
if verbatim_output <= 0:
verbatim_output = len(list(e.childGenerator()))
return strip_excessive_newlines_and_spaces(text)
def parse_html_page_basic(text: str | BytesIO | IO[bytes]) -> str:
soup = bs4.BeautifulSoup(text, "html.parser")
return format_document_soup(soup)
def web_html_cleanup(
page_content: str | bs4.BeautifulSoup,
mintlify_cleanup_enabled: bool = True,
additional_element_types_to_discard: list[str] | None = None,
) -> ParsedHTML:
if isinstance(page_content, str):
soup = bs4.BeautifulSoup(page_content, "html.parser")
else:
soup = page_content
title_tag = soup.find("title")
title = None
if title_tag and title_tag.text:
title = title_tag.text
title_tag.extract()
# Heuristics based cleaning of elements based on css classes
unwanted_classes = copy(WEB_CONNECTOR_IGNORED_CLASSES)
if mintlify_cleanup_enabled:
unwanted_classes.extend(MINTLIFY_UNWANTED)
for undesired_element in unwanted_classes:
[
tag.extract()
for tag in soup.find_all(
class_=lambda x: x and undesired_element in x.split()
)
]
for undesired_tag in WEB_CONNECTOR_IGNORED_ELEMENTS:
[tag.extract() for tag in soup.find_all(undesired_tag)]
if additional_element_types_to_discard:
for undesired_tag in additional_element_types_to_discard:
[tag.extract() for tag in soup.find_all(undesired_tag)]
soup_string = str(soup)
page_text = ""
if PARSE_WITH_TRAFILATURA:
try:
page_text = parse_html_with_trafilatura(soup_string)
if not page_text:
raise ValueError("Empty content returned by trafilatura.")
except Exception as e:
logging.info(f"Trafilatura parsing failed: {e}. Falling back on bs4.")
page_text = format_document_soup(soup)
else:
page_text = format_document_soup(soup)
# 200B is ZeroWidthSpace which we don't care for
cleaned_text = page_text.replace("\u200b", "")
return ParsedHTML(title=title, cleaned_text=cleaned_text)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/moodle_connector.py | common/data_source/moodle_connector.py | from __future__ import annotations
import logging
import os
from collections.abc import Generator
from datetime import datetime, timezone
from retry import retry
from typing import Any, Optional
from markdownify import markdownify as md
from moodle import Moodle as MoodleClient, MoodleException
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
CredentialExpiredError,
InsufficientPermissionsError,
ConnectorValidationError,
)
from common.data_source.interfaces import (
LoadConnector,
PollConnector,
SecondsSinceUnixEpoch,
)
from common.data_source.models import Document
from common.data_source.utils import batch_generator, rl_requests
logger = logging.getLogger(__name__)
class MoodleConnector(LoadConnector, PollConnector):
"""Moodle LMS connector for accessing course content"""
def __init__(self, moodle_url: str, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.moodle_url = moodle_url.rstrip("/")
self.batch_size = batch_size
self.moodle_client: Optional[MoodleClient] = None
def _add_token_to_url(self, file_url: str) -> str:
"""Append Moodle token to URL if missing"""
if not self.moodle_client:
return file_url
token = getattr(self.moodle_client, "token", "")
if "token=" in file_url.lower():
return file_url
delimiter = "&" if "?" in file_url else "?"
return f"{file_url}{delimiter}token={token}"
def _log_error(
self, context: str, error: Exception, level: str = "warning"
) -> None:
"""Simplified logging wrapper"""
msg = f"{context}: {error}"
if level == "error":
logger.error(msg)
else:
logger.warning(msg)
def _get_latest_timestamp(self, *timestamps: int) -> int:
"""Return latest valid timestamp"""
return max((t for t in timestamps if t and t > 0), default=0)
def _yield_in_batches(
self, generator: Generator[Document, None, None]
) -> Generator[list[Document], None, None]:
for batch in batch_generator(generator, self.batch_size):
yield batch
def load_credentials(self, credentials: dict[str, Any]) -> None:
token = credentials.get("moodle_token")
if not token:
raise ConnectorMissingCredentialError("Moodle API token is required")
try:
self.moodle_client = MoodleClient(
self.moodle_url + "/webservice/rest/server.php", token
)
self.moodle_client.core.webservice.get_site_info()
except MoodleException as e:
if "invalidtoken" in str(e).lower():
raise CredentialExpiredError("Moodle token is invalid or expired")
raise ConnectorMissingCredentialError(
f"Failed to initialize Moodle client: {e}"
)
def validate_connector_settings(self) -> None:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
try:
site_info = self.moodle_client.core.webservice.get_site_info()
if not site_info.sitename:
raise InsufficientPermissionsError("Invalid Moodle API response")
except MoodleException as e:
msg = str(e).lower()
if "invalidtoken" in msg:
raise CredentialExpiredError("Moodle token is invalid or expired")
if "accessexception" in msg:
raise InsufficientPermissionsError(
"Insufficient permissions. Ensure web services are enabled and permissions are correct."
)
raise ConnectorValidationError(f"Moodle validation error: {e}")
except Exception as e:
raise ConnectorValidationError(f"Unexpected validation error: {e}")
# -------------------------------------------------------------------------
# Data loading & polling
# -------------------------------------------------------------------------
def load_from_state(self) -> Generator[list[Document], None, None]:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
logger.info("Starting full load from Moodle workspace")
courses = self._get_enrolled_courses()
if not courses:
logger.warning("No courses found to process")
return
yield from self._yield_in_batches(self._process_courses(courses))
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> Generator[list[Document], None, None]:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
logger.info(
f"Polling Moodle updates between {datetime.fromtimestamp(start)} and {datetime.fromtimestamp(end)}"
)
courses = self._get_enrolled_courses()
if not courses:
logger.warning("No courses found to poll")
return
yield from self._yield_in_batches(
self._get_updated_content(courses, start, end)
)
@retry(tries=3, delay=1, backoff=2)
def _get_enrolled_courses(self) -> list:
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
try:
return self.moodle_client.core.course.get_courses()
except MoodleException as e:
self._log_error("fetching courses", e, "error")
raise ConnectorValidationError(f"Failed to fetch courses: {e}")
@retry(tries=3, delay=1, backoff=2)
def _get_course_contents(self, course_id: int):
if not self.moodle_client:
raise ConnectorMissingCredentialError("Moodle client not initialized")
try:
return self.moodle_client.core.course.get_contents(courseid=course_id)
except MoodleException as e:
self._log_error(f"fetching course contents for {course_id}", e)
return []
def _process_courses(self, courses) -> Generator[Document, None, None]:
for course in courses:
try:
contents = self._get_course_contents(course.id)
for section in contents:
for module in section.modules:
doc = self._process_module(course, section, module)
if doc:
yield doc
except Exception as e:
self._log_error(f"processing course {course.fullname}", e)
def _get_updated_content(
self, courses, start: float, end: float
) -> Generator[Document, None, None]:
for course in courses:
try:
contents = self._get_course_contents(course.id)
for section in contents:
for module in section.modules:
times = [
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
]
if hasattr(module, "contents"):
times.extend(
getattr(c, "timemodified", 0)
for c in module.contents
if c and getattr(c, "timemodified", 0)
)
last_mod = self._get_latest_timestamp(*times)
if start < last_mod <= end:
doc = self._process_module(course, section, module)
if doc:
yield doc
except Exception as e:
self._log_error(f"polling course {course.fullname}", e)
def _process_module(self, course, section, module) -> Optional[Document]:
try:
mtype = module.modname
if mtype in ["label", "url"]:
return None
if mtype == "resource":
return self._process_resource(course, section, module)
if mtype == "forum":
return self._process_forum(course, section, module)
if mtype == "page":
return self._process_page(course, section, module)
if mtype in ["assign", "quiz"]:
return self._process_activity(course, section, module)
if mtype == "book":
return self._process_book(course, section, module)
except Exception as e:
self._log_error(f"processing module {getattr(module, 'name', '?')}", e)
return None
def _process_resource(self, course, section, module) -> Optional[Document]:
if not getattr(module, "contents", None):
return None
file_info = module.contents[0]
if not getattr(file_info, "fileurl", None):
return None
file_name = os.path.basename(file_info.filename)
ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
getattr(file_info, "timemodified", 0),
)
try:
resp = rl_requests.get(
self._add_token_to_url(file_info.fileurl), timeout=60
)
resp.raise_for_status()
blob = resp.content
ext = os.path.splitext(file_name)[1] or ".bin"
semantic_id = f"{course.fullname} / {section.name} / {file_name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"module_instance": getattr(module, "instance", None),
"file_url": getattr(file_info, "fileurl", None),
"file_name": file_name,
"file_size": getattr(file_info, "filesize", len(blob)),
"file_type": getattr(file_info, "mimetype", None),
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_resource_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=ext,
blob=blob,
doc_updated_at=datetime.fromtimestamp(ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
except Exception as e:
self._log_error(f"downloading resource {file_name}", e, "error")
return None
def _process_forum(self, course, section, module) -> Optional[Document]:
if not self.moodle_client or not getattr(module, "instance", None):
return None
try:
result = self.moodle_client.mod.forum.get_forum_discussions(
forumid=module.instance
)
disc_list = getattr(result, "discussions", [])
if not disc_list:
return None
markdown = [f"# {module.name}\n"]
latest_ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
)
for d in disc_list:
markdown.append(f"## {d.name}\n\n{md(d.message or '')}\n\n---\n")
latest_ts = max(latest_ts, getattr(d, "timemodified", 0))
blob = "\n".join(markdown).encode("utf-8")
semantic_id = f"{course.fullname} / {section.name} / {module.name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"forum_id": getattr(module, "instance", None),
"discussion_count": len(disc_list),
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
"discussions": [
{
"id": getattr(d, "id", None),
"name": getattr(d, "name", None),
"user_id": getattr(d, "userid", None),
"user_fullname": getattr(d, "userfullname", None),
"time_created": getattr(d, "timecreated", None),
"time_modified": getattr(d, "timemodified", None),
}
for d in disc_list
],
}
return Document(
id=f"moodle_forum_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=".md",
blob=blob,
doc_updated_at=datetime.fromtimestamp(latest_ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
except Exception as e:
self._log_error(f"processing forum {module.name}", e)
return None
def _process_page(self, course, section, module) -> Optional[Document]:
if not getattr(module, "contents", None):
return None
file_info = module.contents[0]
if not getattr(file_info, "fileurl", None):
return None
file_name = os.path.basename(file_info.filename)
ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
getattr(file_info, "timemodified", 0),
)
try:
resp = rl_requests.get(
self._add_token_to_url(file_info.fileurl), timeout=60
)
resp.raise_for_status()
blob = resp.content
ext = os.path.splitext(file_name)[1] or ".html"
semantic_id = f"{course.fullname} / {section.name} / {module.name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"module_instance": getattr(module, "instance", None),
"page_url": getattr(file_info, "fileurl", None),
"file_name": file_name,
"file_size": getattr(file_info, "filesize", len(blob)),
"file_type": getattr(file_info, "mimetype", None),
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_page_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=ext,
blob=blob,
doc_updated_at=datetime.fromtimestamp(ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
except Exception as e:
self._log_error(f"processing page {file_name}", e, "error")
return None
def _process_activity(self, course, section, module) -> Optional[Document]:
desc = getattr(module, "description", "")
if not desc:
return None
mtype, mname = module.modname, module.name
markdown = f"# {mname}\n\n**Type:** {mtype.capitalize()}\n\n{md(desc)}"
ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
getattr(module, "added", 0),
)
semantic_id = f"{course.fullname} / {section.name} / {mname}"
blob = markdown.encode("utf-8")
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"activity_type": mtype,
"activity_instance": getattr(module, "instance", None),
"description": desc,
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"added": getattr(module, "added", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_{mtype}_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=".md",
blob=blob,
doc_updated_at=datetime.fromtimestamp(ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
def _process_book(self, course, section, module) -> Optional[Document]:
if not getattr(module, "contents", None):
return None
contents = module.contents
chapters = [
c
for c in contents
if getattr(c, "fileurl", None)
and os.path.basename(c.filename) == "index.html"
]
if not chapters:
return None
latest_ts = self._get_latest_timestamp(
getattr(module, "timecreated", 0),
getattr(module, "timemodified", 0),
*[getattr(c, "timecreated", 0) for c in contents],
*[getattr(c, "timemodified", 0) for c in contents],
)
markdown_parts = [f"# {module.name}\n"]
chapter_info = []
for ch in chapters:
try:
resp = rl_requests.get(self._add_token_to_url(ch.fileurl), timeout=60)
resp.raise_for_status()
html = resp.content.decode("utf-8", errors="ignore")
markdown_parts.append(md(html) + "\n\n---\n")
# Collect chapter information for metadata
chapter_info.append(
{
"chapter_id": getattr(ch, "chapterid", None),
"title": getattr(ch, "title", None),
"filename": getattr(ch, "filename", None),
"fileurl": getattr(ch, "fileurl", None),
"time_created": getattr(ch, "timecreated", None),
"time_modified": getattr(ch, "timemodified", None),
"size": getattr(ch, "filesize", None),
}
)
except Exception as e:
self._log_error(f"processing book chapter {ch.filename}", e)
blob = "\n".join(markdown_parts).encode("utf-8")
semantic_id = f"{course.fullname} / {section.name} / {module.name}"
# Create metadata dictionary with relevant information
metadata = {
"moodle_url": self.moodle_url,
"course_id": getattr(course, "id", None),
"course_name": getattr(course, "fullname", None),
"course_shortname": getattr(course, "shortname", None),
"section_id": getattr(section, "id", None),
"section_name": getattr(section, "name", None),
"section_number": getattr(section, "section", None),
"module_id": getattr(module, "id", None),
"module_name": getattr(module, "name", None),
"module_type": getattr(module, "modname", None),
"book_id": getattr(module, "instance", None),
"chapter_count": len(chapters),
"chapters": chapter_info,
"time_created": getattr(module, "timecreated", None),
"time_modified": getattr(module, "timemodified", None),
"visible": getattr(module, "visible", None),
"groupmode": getattr(module, "groupmode", None),
}
return Document(
id=f"moodle_book_{module.id}",
source="moodle",
semantic_identifier=semantic_id,
extension=".md",
blob=blob,
doc_updated_at=datetime.fromtimestamp(latest_ts or 0, tz=timezone.utc),
size_bytes=len(blob),
metadata=metadata,
)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/confluence_connector.py | common/data_source/confluence_connector.py |
"""Confluence connector"""
import copy
import json
import logging
import time
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Any, cast, Iterator, Callable, Generator
import requests
from typing_extensions import override
from urllib.parse import quote
import bs4
from atlassian.errors import ApiError
from atlassian import Confluence
from requests.exceptions import HTTPError
from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource, CONTINUE_ON_CONNECTOR_FAILURE, \
CONFLUENCE_CONNECTOR_LABELS_TO_SKIP, CONFLUENCE_TIMEZONE_OFFSET, CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE, \
CONFLUENCE_SYNC_TIME_BUFFER_SECONDS, \
OAUTH_CONFLUENCE_CLOUD_CLIENT_ID, OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET, _DEFAULT_PAGINATION_LIMIT, \
_PROBLEMATIC_EXPANSIONS, _REPLACEMENT_EXPANSIONS, _USER_NOT_FOUND, _COMMENT_EXPANSION_FIELDS, \
_ATTACHMENT_EXPANSION_FIELDS, _PAGE_EXPANSION_FIELDS, ONE_DAY, ONE_HOUR, _RESTRICTIONS_EXPANSION_FIELDS, \
_SLIM_DOC_BATCH_SIZE, CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
InsufficientPermissionsError,
UnexpectedValidationError, CredentialExpiredError
)
from common.data_source.html_utils import format_document_soup
from common.data_source.interfaces import (
ConnectorCheckpoint,
CredentialsConnector,
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync, StaticCredentialsProvider, CheckpointedConnector, SlimConnector,
CredentialsProviderInterface, ConfluenceUser, IndexingHeartbeatInterface, AttachmentProcessingResult,
CheckpointOutput
)
from common.data_source.models import ConnectorFailure, Document, TextSection, ImageSection, BasicExpertInfo, \
DocumentFailure, GenerateSlimDocumentOutput, SlimDocument, ExternalAccess
from common.data_source.utils import load_all_docs_from_checkpoint_connector, scoped_url, \
process_confluence_user_profiles_override, confluence_refresh_tokens, run_with_timeout, _handle_http_error, \
update_param_in_path, get_start_param_from_url, build_confluence_document_id, datetime_from_string, \
is_atlassian_date_error, validate_attachment_filetype
from rag.utils.redis_conn import RedisDB, REDIS_CONN
_USER_ID_TO_DISPLAY_NAME_CACHE: dict[str, str | None] = {}
_USER_EMAIL_CACHE: dict[str, str | None] = {}
class ConfluenceCheckpoint(ConnectorCheckpoint):
next_page_url: str | None
class ConfluenceRateLimitError(Exception):
pass
class OnyxConfluence:
"""
This is a custom Confluence class that:
A. overrides the default Confluence class to add a custom CQL method.
B.
This is necessary because the default Confluence class does not properly support cql expansions.
All methods are automatically wrapped with handle_confluence_rate_limit.
"""
CREDENTIAL_PREFIX = "connector:confluence:credential"
CREDENTIAL_TTL = 300 # 5 min
PROBE_TIMEOUT = 5 # 5 seconds
def __init__(
self,
is_cloud: bool,
url: str,
credentials_provider: CredentialsProviderInterface,
timeout: int | None = None,
scoped_token: bool = False,
# should generally not be passed in, but making it overridable for
# easier testing
confluence_user_profiles_override: list[dict[str, str]] | None = (
CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE
),
) -> None:
self.base_url = url #'/'.join(url.rstrip("/").split("/")[:-1])
url = scoped_url(url, "confluence") if scoped_token else url
self._is_cloud = is_cloud
self._url = url.rstrip("/")
self._credentials_provider = credentials_provider
self.scoped_token = scoped_token
self.redis_client: RedisDB | None = None
self.static_credentials: dict[str, Any] | None = None
if self._credentials_provider.is_dynamic():
self.redis_client = REDIS_CONN
else:
self.static_credentials = self._credentials_provider.get_credentials()
self._confluence = Confluence(url)
self.credential_key: str = (
self.CREDENTIAL_PREFIX
+ f":credential_{self._credentials_provider.get_provider_key()}"
)
self._kwargs: Any = None
self.shared_base_kwargs: dict[str, str | int | bool] = {
"api_version": "cloud" if is_cloud else "latest",
"backoff_and_retry": True,
"cloud": is_cloud,
}
if timeout:
self.shared_base_kwargs["timeout"] = timeout
self._confluence_user_profiles_override = (
process_confluence_user_profiles_override(confluence_user_profiles_override)
if confluence_user_profiles_override
else None
)
def _renew_credentials(self) -> tuple[dict[str, Any], bool]:
"""credential_json - the current json credentials
Returns a tuple
1. The up-to-date credentials
2. True if the credentials were updated
This method is intended to be used within a distributed lock.
Lock, call this, update credentials if the tokens were refreshed, then release
"""
# static credentials are preloaded, so no locking/redis required
if self.static_credentials:
return self.static_credentials, False
if not self.redis_client:
raise RuntimeError("self.redis_client is None")
# dynamic credentials need locking
# check redis first, then fallback to the DB
credential_raw = self.redis_client.get(self.credential_key)
if credential_raw is not None:
credential_bytes = cast(bytes, credential_raw)
credential_str = credential_bytes.decode("utf-8")
credential_json: dict[str, Any] = json.loads(credential_str)
else:
credential_json = self._credentials_provider.get_credentials()
if "confluence_refresh_token" not in credential_json:
# static credentials ... cache them permanently and return
self.static_credentials = credential_json
return credential_json, False
if not OAUTH_CONFLUENCE_CLOUD_CLIENT_ID:
raise RuntimeError("OAUTH_CONFLUENCE_CLOUD_CLIENT_ID must be set!")
if not OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET:
raise RuntimeError("OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET must be set!")
# check if we should refresh tokens. we're deciding to refresh halfway
# to expiration
now = datetime.now(timezone.utc)
created_at = datetime.fromisoformat(credential_json["created_at"])
expires_in: int = credential_json["expires_in"]
renew_at = created_at + timedelta(seconds=expires_in // 2)
if now <= renew_at:
# cached/current credentials are reasonably up to date
return credential_json, False
# we need to refresh
logging.info("Renewing Confluence Cloud credentials...")
new_credentials = confluence_refresh_tokens(
OAUTH_CONFLUENCE_CLOUD_CLIENT_ID,
OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET,
credential_json["cloud_id"],
credential_json["confluence_refresh_token"],
)
# store the new credentials to redis and to the db through the provider
# redis: we use a 5 min TTL because we are given a 10 minutes grace period
# when keys are rotated. it's easier to expire the cached credentials
# reasonably frequently rather than trying to handle strong synchronization
# between the db and redis everywhere the credentials might be updated
new_credential_str = json.dumps(new_credentials)
self.redis_client.set(
self.credential_key, new_credential_str, exp=self.CREDENTIAL_TTL
)
self._credentials_provider.set_credentials(new_credentials)
return new_credentials, True
@staticmethod
def _make_oauth2_dict(credentials: dict[str, Any]) -> dict[str, Any]:
oauth2_dict: dict[str, Any] = {}
if "confluence_refresh_token" in credentials:
oauth2_dict["client_id"] = OAUTH_CONFLUENCE_CLOUD_CLIENT_ID
oauth2_dict["token"] = {}
oauth2_dict["token"]["access_token"] = credentials[
"confluence_access_token"
]
return oauth2_dict
def _probe_connection(
self,
**kwargs: Any,
) -> None:
merged_kwargs = {**self.shared_base_kwargs, **kwargs}
# add special timeout to make sure that we don't hang indefinitely
merged_kwargs["timeout"] = self.PROBE_TIMEOUT
with self._credentials_provider:
credentials, _ = self._renew_credentials()
if self.scoped_token:
# v2 endpoint doesn't always work with scoped tokens, use v1
token = credentials["confluence_access_token"]
probe_url = f"{self.base_url}/rest/api/space?limit=1"
import requests
logging.info(f"First and Last 5 of token: {token[:5]}...{token[-5:]}")
try:
r = requests.get(
probe_url,
headers={"Authorization": f"Bearer {token}"},
timeout=10,
)
r.raise_for_status()
except HTTPError as e:
if e.response.status_code == 403:
logging.warning(
"scoped token authenticated but not valid for probe endpoint (spaces)"
)
else:
if "WWW-Authenticate" in e.response.headers:
logging.warning(
f"WWW-Authenticate: {e.response.headers['WWW-Authenticate']}"
)
logging.warning(f"Full error: {e.response.text}")
raise e
return
# probe connection with direct client, no retries
if "confluence_refresh_token" in credentials:
logging.info("Probing Confluence with OAuth Access Token.")
oauth2_dict: dict[str, Any] = OnyxConfluence._make_oauth2_dict(
credentials
)
url = (
f"https://api.atlassian.com/ex/confluence/{credentials['cloud_id']}"
)
confluence_client_with_minimal_retries = Confluence(
url=url, oauth2=oauth2_dict, **merged_kwargs
)
else:
logging.info("Probing Confluence with Personal Access Token.")
url = self._url
if self._is_cloud:
logging.info("running with cloud client")
confluence_client_with_minimal_retries = Confluence(
url=url,
username=credentials["confluence_username"],
password=credentials["confluence_access_token"],
**merged_kwargs,
)
else:
confluence_client_with_minimal_retries = Confluence(
url=url,
token=credentials["confluence_access_token"],
**merged_kwargs,
)
# This call sometimes hangs indefinitely, so we run it in a timeout
spaces = run_with_timeout(
timeout=10,
func=confluence_client_with_minimal_retries.get_all_spaces,
limit=1,
)
# uncomment the following for testing
# the following is an attempt to retrieve the user's timezone
# Unfornately, all data is returned in UTC regardless of the user's time zone
# even tho CQL parses incoming times based on the user's time zone
# space_key = spaces["results"][0]["key"]
# space_details = confluence_client_with_minimal_retries.cql(f"space.key={space_key}+AND+type=space")
if not spaces:
raise RuntimeError(
f"No spaces found at {url}! "
"Check your credentials and wiki_base and make sure "
"is_cloud is set correctly."
)
logging.info("Confluence probe succeeded.")
def _initialize_connection(
self,
**kwargs: Any,
) -> None:
"""Called externally to init the connection in a thread safe manner."""
merged_kwargs = {**self.shared_base_kwargs, **kwargs}
with self._credentials_provider:
credentials, _ = self._renew_credentials()
self._confluence = self._initialize_connection_helper(
credentials, **merged_kwargs
)
self._kwargs = merged_kwargs
def _initialize_connection_helper(
self,
credentials: dict[str, Any],
**kwargs: Any,
) -> Confluence:
"""Called internally to init the connection. Distributed locking
to prevent multiple threads from modifying the credentials
must be handled around this function."""
confluence = None
# probe connection with direct client, no retries
if "confluence_refresh_token" in credentials:
logging.info("Connecting to Confluence Cloud with OAuth Access Token.")
oauth2_dict: dict[str, Any] = OnyxConfluence._make_oauth2_dict(credentials)
url = f"https://api.atlassian.com/ex/confluence/{credentials['cloud_id']}"
confluence = Confluence(url=url, oauth2=oauth2_dict, **kwargs)
else:
logging.info(
f"Connecting to Confluence with Personal Access Token as user: {credentials['confluence_username']}"
)
if self._is_cloud:
confluence = Confluence(
url=self._url,
username=credentials["confluence_username"],
password=credentials["confluence_access_token"],
**kwargs,
)
else:
confluence = Confluence(
url=self._url,
token=credentials["confluence_access_token"],
**kwargs,
)
return confluence
# https://developer.atlassian.com/cloud/confluence/rate-limiting/
# This uses the native rate limiting option provided by the
# confluence client and otherwise applies a simpler set of error handling.
def _make_rate_limited_confluence_method(
self, name: str, credential_provider: CredentialsProviderInterface | None
) -> Callable[..., Any]:
def wrapped_call(*args: list[Any], **kwargs: Any) -> Any:
MAX_RETRIES = 5
TIMEOUT = 600
timeout_at = time.monotonic() + TIMEOUT
for attempt in range(MAX_RETRIES):
if time.monotonic() > timeout_at:
raise TimeoutError(
f"Confluence call attempts took longer than {TIMEOUT} seconds."
)
# we're relying more on the client to rate limit itself
# and applying our own retries in a more specific set of circumstances
try:
if credential_provider:
with credential_provider:
credentials, renewed = self._renew_credentials()
if renewed:
self._confluence = self._initialize_connection_helper(
credentials, **self._kwargs
)
attr = getattr(self._confluence, name, None)
if attr is None:
# The underlying Confluence client doesn't have this attribute
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
return attr(*args, **kwargs)
else:
attr = getattr(self._confluence, name, None)
if attr is None:
# The underlying Confluence client doesn't have this attribute
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
return attr(*args, **kwargs)
except HTTPError as e:
delay_until = _handle_http_error(e, attempt)
logging.warning(
f"HTTPError in confluence call. "
f"Retrying in {delay_until} seconds..."
)
while time.monotonic() < delay_until:
# in the future, check a signal here to exit
time.sleep(1)
except AttributeError as e:
# Some error within the Confluence library, unclear why it fails.
# Users reported it to be intermittent, so just retry
if attempt == MAX_RETRIES - 1:
raise e
logging.exception(
"Confluence Client raised an AttributeError. Retrying..."
)
time.sleep(5)
return wrapped_call
def __getattr__(self, name: str) -> Any:
"""Dynamically intercept attribute/method access."""
attr = getattr(self._confluence, name, None)
if attr is None:
# The underlying Confluence client doesn't have this attribute
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
# If it's not a method, just return it after ensuring token validity
if not callable(attr):
return attr
# skip methods that start with "_"
if name.startswith("_"):
return attr
# wrap the method with our retry handler
rate_limited_method: Callable[..., Any] = (
self._make_rate_limited_confluence_method(name, self._credentials_provider)
)
return rate_limited_method
def _try_one_by_one_for_paginated_url(
self,
url_suffix: str,
initial_start: int,
limit: int,
) -> Generator[dict[str, Any], None, str | None]:
"""
Go through `limit` items, starting at `initial_start` one by one (e.g. using
`limit=1` for each call).
If we encounter an error, we skip the item and try the next one. We will return
the items we were able to retrieve successfully.
Returns the expected next url_suffix. Returns None if it thinks we've hit the end.
TODO (chris): make this yield failures as well as successes.
TODO (chris): make this work for confluence cloud somehow.
"""
if self._is_cloud:
raise RuntimeError("This method is not implemented for Confluence Cloud.")
found_empty_page = False
temp_url_suffix = url_suffix
for ind in range(limit):
try:
temp_url_suffix = update_param_in_path(
url_suffix, "start", str(initial_start + ind)
)
temp_url_suffix = update_param_in_path(temp_url_suffix, "limit", "1")
logging.info(f"Making recovery confluence call to {temp_url_suffix}")
raw_response = self.get(path=temp_url_suffix, advanced_mode=True)
raw_response.raise_for_status()
latest_results = raw_response.json().get("results", [])
yield from latest_results
if not latest_results:
# no more results, break out of the loop
logging.info(
f"No results found for call '{temp_url_suffix}'"
"Stopping pagination."
)
found_empty_page = True
break
except Exception:
logging.exception(
f"Error in confluence call to {temp_url_suffix}. Continuing."
)
if found_empty_page:
return None
# if we got here, we successfully tried `limit` items
return update_param_in_path(url_suffix, "start", str(initial_start + limit))
def _paginate_url(
self,
url_suffix: str,
limit: int | None = None,
# Called with the next url to use to get the next page
next_page_callback: Callable[[str], None] | None = None,
force_offset_pagination: bool = False,
) -> Iterator[dict[str, Any]]:
"""
This will paginate through the top level query.
"""
if not limit:
limit = _DEFAULT_PAGINATION_LIMIT
url_suffix = update_param_in_path(url_suffix, "limit", str(limit))
while url_suffix:
logging.debug(f"Making confluence call to {url_suffix}")
try:
raw_response = self.get(
path=url_suffix,
advanced_mode=True,
params={
"body-format": "atlas_doc_format",
"expand": "body.atlas_doc_format",
},
)
except Exception as e:
logging.exception(f"Error in confluence call to {url_suffix}")
raise e
try:
raw_response.raise_for_status()
except Exception as e:
logging.warning(f"Error in confluence call to {url_suffix}")
# If the problematic expansion is in the url, replace it
# with the replacement expansion and try again
# If that fails, raise the error
if _PROBLEMATIC_EXPANSIONS in url_suffix:
logging.warning(
f"Replacing {_PROBLEMATIC_EXPANSIONS} with {_REPLACEMENT_EXPANSIONS}"
" and trying again."
)
url_suffix = url_suffix.replace(
_PROBLEMATIC_EXPANSIONS,
_REPLACEMENT_EXPANSIONS,
)
continue
# If we fail due to a 500, try one by one.
# NOTE: this iterative approach only works for server, since cloud uses cursor-based
# pagination
if raw_response.status_code == 500 and not self._is_cloud:
initial_start = get_start_param_from_url(url_suffix)
if initial_start is None:
# can't handle this if we don't have offset-based pagination
raise
# this will just yield the successful items from the batch
new_url_suffix = yield from self._try_one_by_one_for_paginated_url(
url_suffix,
initial_start=initial_start,
limit=limit,
)
# this means we ran into an empty page
if new_url_suffix is None:
if next_page_callback:
next_page_callback("")
break
url_suffix = new_url_suffix
continue
else:
logging.exception(
f"Error in confluence call to {url_suffix} \n"
f"Raw Response Text: {raw_response.text} \n"
f"Full Response: {raw_response.__dict__} \n"
f"Error: {e} \n"
)
raise
try:
next_response = raw_response.json()
except Exception as e:
logging.exception(
f"Failed to parse response as JSON. Response: {raw_response.__dict__}"
)
raise e
# Yield the results individually.
results = cast(list[dict[str, Any]], next_response.get("results", []))
# Note 1:
# Make sure we don't update the start by more than the amount
# of results we were able to retrieve. The Confluence API has a
# weird behavior where if you pass in a limit that is too large for
# the configured server, it will artificially limit the amount of
# results returned BUT will not apply this to the start parameter.
# This will cause us to miss results.
#
# Note 2:
# We specifically perform manual yielding (i.e., `for x in xs: yield x`) as opposed to using a `yield from xs`
# because we *have to call the `next_page_callback`* prior to yielding the last element!
#
# If we did:
#
# ```py
# yield from results
# if next_page_callback:
# next_page_callback(url_suffix)
# ```
#
# then the logic would fail since the iterator would finish (and the calling scope would exit out of its driving
# loop) prior to the callback being called.
old_url_suffix = url_suffix
updated_start = get_start_param_from_url(old_url_suffix)
url_suffix = cast(str, next_response.get("_links", {}).get("next", ""))
for i, result in enumerate(results):
updated_start += 1
if url_suffix and next_page_callback and i == len(results) - 1:
# update the url if we're on the last result in the page
if not self._is_cloud:
# If confluence claims there are more results, we update the start param
# based on how many results were returned and try again.
url_suffix = update_param_in_path(
url_suffix, "start", str(updated_start)
)
# notify the caller of the new url
next_page_callback(url_suffix)
elif force_offset_pagination and i == len(results) - 1:
url_suffix = update_param_in_path(
old_url_suffix, "start", str(updated_start)
)
yield result
# we've observed that Confluence sometimes returns a next link despite giving
# 0 results. This is a bug with Confluence, so we need to check for it and
# stop paginating.
if url_suffix and not results:
logging.info(
f"No results found for call '{old_url_suffix}' despite next link "
"being present. Stopping pagination."
)
break
def build_cql_url(self, cql: str, expand: str | None = None) -> str:
expand_string = f"&expand={expand}" if expand else ""
return f"rest/api/content/search?cql={cql}{expand_string}"
def paginated_cql_retrieval(
self,
cql: str,
expand: str | None = None,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
The content/search endpoint can be used to fetch pages, attachments, and comments.
"""
cql_url = self.build_cql_url(cql, expand)
yield from self._paginate_url(cql_url, limit)
def paginated_page_retrieval(
self,
cql_url: str,
limit: int,
# Called with the next url to use to get the next page
next_page_callback: Callable[[str], None] | None = None,
) -> Iterator[dict[str, Any]]:
"""
Error handling (and testing) wrapper for _paginate_url,
because the current approach to page retrieval involves handling the
next page links manually.
"""
try:
yield from self._paginate_url(
cql_url, limit=limit, next_page_callback=next_page_callback
)
except Exception as e:
logging.exception(f"Error in paginated_page_retrieval: {e}")
raise e
def cql_paginate_all_expansions(
self,
cql: str,
expand: str | None = None,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This function will paginate through the top level query first, then
paginate through all the expansions.
"""
def _traverse_and_update(data: dict | list) -> None:
if isinstance(data, dict):
next_url = data.get("_links", {}).get("next")
if next_url and "results" in data:
data["results"].extend(self._paginate_url(next_url, limit=limit))
for value in data.values():
_traverse_and_update(value)
elif isinstance(data, list):
for item in data:
_traverse_and_update(item)
for confluence_object in self.paginated_cql_retrieval(cql, expand, limit):
_traverse_and_update(confluence_object)
yield confluence_object
def paginated_cql_user_retrieval(
self,
expand: str | None = None,
limit: int | None = None,
) -> Iterator[ConfluenceUser]:
"""
The search/user endpoint can be used to fetch users.
It's a separate endpoint from the content/search endpoint used only for users.
It's very similar to the content/search endpoint.
"""
# this is needed since there is a live bug with Confluence Server/Data Center
# where not all users are returned by the APIs. This is a workaround needed until
# that is patched.
if self._confluence_user_profiles_override:
yield from self._confluence_user_profiles_override
elif self._is_cloud:
cql = "type=user"
url = "rest/api/search/user"
expand_string = f"&expand={expand}" if expand else ""
url += f"?cql={cql}{expand_string}"
for user_result in self._paginate_url(
url, limit, force_offset_pagination=True
):
user = user_result["user"]
yield ConfluenceUser(
user_id=user["accountId"],
username=None,
display_name=user["displayName"],
email=user.get("email"),
type=user["accountType"],
)
else:
for user in self._paginate_url("rest/api/user/list", limit):
yield ConfluenceUser(
user_id=user["userKey"],
username=user["username"],
display_name=user["displayName"],
email=None,
type=user.get("type", "user"),
)
def paginated_groups_by_user_retrieval(
self,
user_id: str, # accountId in Cloud, userKey in Server
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This is not an SQL like query.
It's a confluence specific endpoint that can be used to fetch groups.
"""
user_field = "accountId" if self._is_cloud else "key"
user_value = user_id
# Server uses userKey (but calls it key during the API call), Cloud uses accountId
user_query = f"{user_field}={quote(user_value)}"
url = f"rest/api/user/memberof?{user_query}"
yield from self._paginate_url(url, limit, force_offset_pagination=True)
def paginated_groups_retrieval(
self,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This is not an SQL like query.
It's a confluence specific endpoint that can be used to fetch groups.
"""
yield from self._paginate_url("rest/api/group", limit)
def paginated_group_members_retrieval(
self,
group_name: str,
limit: int | None = None,
) -> Iterator[dict[str, Any]]:
"""
This is not an SQL like query.
It's a confluence specific endpoint that can be used to fetch the members of a group.
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/teams_connector.py | common/data_source/teams_connector.py | """Microsoft Teams connector"""
from typing import Any
import msal
from office365.graph_client import GraphClient
from office365.runtime.client_request_exception import ClientRequestException
from common.data_source.exceptions import (
ConnectorValidationError,
InsufficientPermissionsError,
UnexpectedValidationError, ConnectorMissingCredentialError
)
from common.data_source.interfaces import (
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync, CheckpointedConnectorWithPermSync
)
from common.data_source.models import (
ConnectorCheckpoint
)
_SLIM_DOC_BATCH_SIZE = 5000
class TeamsCheckpoint(ConnectorCheckpoint):
"""Teams-specific checkpoint"""
todo_team_ids: list[str] | None = None
class TeamsConnector(CheckpointedConnectorWithPermSync, SlimConnectorWithPermSync):
"""Microsoft Teams connector for accessing Teams messages and channels"""
def __init__(self, batch_size: int = _SLIM_DOC_BATCH_SIZE) -> None:
self.batch_size = batch_size
self.teams_client = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load Microsoft Teams credentials"""
try:
tenant_id = credentials.get("tenant_id")
client_id = credentials.get("client_id")
client_secret = credentials.get("client_secret")
if not all([tenant_id, client_id, client_secret]):
raise ConnectorMissingCredentialError("Microsoft Teams credentials are incomplete")
# Create MSAL confidential client
app = msal.ConfidentialClientApplication(
client_id=client_id,
client_credential=client_secret,
authority=f"https://login.microsoftonline.com/{tenant_id}"
)
# Get access token
result = app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
if "access_token" not in result:
raise ConnectorMissingCredentialError("Failed to acquire Microsoft Teams access token")
# Create Graph client for Teams
self.teams_client = GraphClient(result["access_token"])
return None
except Exception as e:
raise ConnectorMissingCredentialError(f"Microsoft Teams: {e}")
def validate_connector_settings(self) -> None:
"""Validate Microsoft Teams connector settings"""
if not self.teams_client:
raise ConnectorMissingCredentialError("Microsoft Teams")
try:
# Test connection by getting teams
teams = self.teams_client.teams.get().execute_query()
if not teams:
raise ConnectorValidationError("Failed to access Microsoft Teams")
except ClientRequestException as e:
if "401" in str(e) or "403" in str(e):
raise InsufficientPermissionsError("Invalid credentials or insufficient permissions")
else:
raise UnexpectedValidationError(f"Microsoft Teams validation error: {e}")
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Any:
"""Poll Microsoft Teams for recent messages"""
# Simplified implementation - in production this would handle actual polling
return []
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: ConnectorCheckpoint,
) -> Any:
"""Load documents from checkpoint"""
# Simplified implementation
return []
def build_dummy_checkpoint(self) -> ConnectorCheckpoint:
"""Build dummy checkpoint"""
return TeamsCheckpoint()
def validate_checkpoint_json(self, checkpoint_json: str) -> ConnectorCheckpoint:
"""Validate checkpoint JSON"""
# Simplified implementation
return TeamsCheckpoint()
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None,
) -> Any:
"""Retrieve all simplified documents with permission sync"""
# Simplified implementation
return [] | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/cross_connector_utils/retry_wrapper.py | common/data_source/cross_connector_utils/retry_wrapper.py | from collections.abc import Callable
import logging
from logging import Logger
from typing import Any
from typing import cast
from typing import TypeVar
import requests
from retry import retry
from common.data_source.config import REQUEST_TIMEOUT_SECONDS
F = TypeVar("F", bound=Callable[..., Any])
logger = logging.getLogger(__name__)
def retry_builder(
tries: int = 20,
delay: float = 0.1,
max_delay: float | None = 60,
backoff: float = 2,
jitter: tuple[float, float] | float = 1,
exceptions: type[Exception] | tuple[type[Exception], ...] = (Exception,),
) -> Callable[[F], F]:
"""Builds a generic wrapper/decorator for calls to external APIs that
may fail due to rate limiting, flakes, or other reasons. Applies exponential
backoff with jitter to retry the call."""
def retry_with_default(func: F) -> F:
@retry(
tries=tries,
delay=delay,
max_delay=max_delay,
backoff=backoff,
jitter=jitter,
logger=cast(Logger, logger),
exceptions=exceptions,
)
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
return func(*args, **kwargs)
return cast(F, wrapped_func)
return retry_with_default
def request_with_retries(
method: str,
url: str,
*,
data: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
params: dict[str, Any] | None = None,
timeout: int = REQUEST_TIMEOUT_SECONDS,
stream: bool = False,
tries: int = 8,
delay: float = 1,
backoff: float = 2,
) -> requests.Response:
@retry(tries=tries, delay=delay, backoff=backoff, logger=cast(Logger, logger))
def _make_request() -> requests.Response:
response = requests.request(
method=method,
url=url,
data=data,
headers=headers,
params=params,
timeout=timeout,
stream=stream,
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception(
"Request failed:\n%s",
{
"method": method,
"url": url,
"data": data,
"headers": headers,
"params": params,
"timeout": timeout,
"stream": stream,
},
)
raise
return response
return _make_request() | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/cross_connector_utils/rate_limit_wrapper.py | common/data_source/cross_connector_utils/rate_limit_wrapper.py | import time
import logging
from collections.abc import Callable
from functools import wraps
from typing import Any
from typing import cast
from typing import TypeVar
import requests
F = TypeVar("F", bound=Callable[..., Any])
class RateLimitTriedTooManyTimesError(Exception):
pass
class _RateLimitDecorator:
"""Builds a generic wrapper/decorator for calls to external APIs that
prevents making more than `max_calls` requests per `period`
Implementation inspired by the `ratelimit` library:
https://github.com/tomasbasham/ratelimit.
NOTE: is not thread safe.
"""
def __init__(
self,
max_calls: int,
period: float, # in seconds
sleep_time: float = 2, # in seconds
sleep_backoff: float = 2, # applies exponential backoff
max_num_sleep: int = 0,
):
self.max_calls = max_calls
self.period = period
self.sleep_time = sleep_time
self.sleep_backoff = sleep_backoff
self.max_num_sleep = max_num_sleep
self.call_history: list[float] = []
self.curr_calls = 0
def __call__(self, func: F) -> F:
@wraps(func)
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
# cleanup calls which are no longer relevant
self._cleanup()
# check if we've exceeded the rate limit
sleep_cnt = 0
while len(self.call_history) == self.max_calls:
sleep_time = self.sleep_time * (self.sleep_backoff**sleep_cnt)
logging.warning(
f"Rate limit exceeded for function {func.__name__}. "
f"Waiting {sleep_time} seconds before retrying."
)
time.sleep(sleep_time)
sleep_cnt += 1
if self.max_num_sleep != 0 and sleep_cnt >= self.max_num_sleep:
raise RateLimitTriedTooManyTimesError(
f"Exceeded '{self.max_num_sleep}' retries for function '{func.__name__}'"
)
self._cleanup()
# add the current call to the call history
self.call_history.append(time.monotonic())
return func(*args, **kwargs)
return cast(F, wrapped_func)
def _cleanup(self) -> None:
curr_time = time.monotonic()
time_to_expire_before = curr_time - self.period
self.call_history = [
call_time
for call_time in self.call_history
if call_time > time_to_expire_before
]
rate_limit_builder = _RateLimitDecorator
"""If you want to allow the external service to tell you when you've hit the rate limit,
use the following instead"""
R = TypeVar("R", bound=Callable[..., requests.Response])
def wrap_request_to_handle_ratelimiting(
request_fn: R, default_wait_time_sec: int = 30, max_waits: int = 30
) -> R:
def wrapped_request(*args: list, **kwargs: dict[str, Any]) -> requests.Response:
for _ in range(max_waits):
response = request_fn(*args, **kwargs)
if response.status_code == 429:
try:
wait_time = int(
response.headers.get("Retry-After", default_wait_time_sec)
)
except ValueError:
wait_time = default_wait_time_sec
time.sleep(wait_time)
continue
return response
raise RateLimitTriedTooManyTimesError(f"Exceeded '{max_waits}' retries")
return cast(R, wrapped_request)
_rate_limited_get = wrap_request_to_handle_ratelimiting(requests.get)
_rate_limited_post = wrap_request_to_handle_ratelimiting(requests.post)
class _RateLimitedRequest:
get = _rate_limited_get
post = _rate_limited_post
rl_requests = _RateLimitedRequest | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/cross_connector_utils/__init__.py | common/data_source/cross_connector_utils/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/github/connector.py | common/data_source/github/connector.py | import copy
import logging
from collections.abc import Callable
from collections.abc import Generator
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from enum import Enum
from typing import Any
from typing import cast
from github import Github, Auth
from github import RateLimitExceededException
from github import Repository
from github.GithubException import GithubException
from github.Issue import Issue
from github.NamedUser import NamedUser
from github.PaginatedList import PaginatedList
from github.PullRequest import PullRequest
from pydantic import BaseModel
from typing_extensions import override
from common.data_source.utils import sanitize_filename
from common.data_source.config import DocumentSource, GITHUB_CONNECTOR_BASE_URL
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import CheckpointedConnectorWithPermSyncGH, CheckpointOutput
from common.data_source.models import (
ConnectorCheckpoint,
ConnectorFailure,
Document,
DocumentFailure,
ExternalAccess,
SecondsSinceUnixEpoch,
)
from common.data_source.connector_runner import ConnectorRunner
from .models import SerializedRepository
from .rate_limit_utils import sleep_after_rate_limit_exception
from .utils import deserialize_repository
from .utils import get_external_access_permission
ITEMS_PER_PAGE = 100
CURSOR_LOG_FREQUENCY = 50
_MAX_NUM_RATE_LIMIT_RETRIES = 5
ONE_DAY = timedelta(days=1)
SLIM_BATCH_SIZE = 100
# Cases
# X (from start) standard run, no fallback to cursor-based pagination
# X (from start) standard run errors, fallback to cursor-based pagination
# X error in the middle of a page
# X no errors: run to completion
# X (from checkpoint) standard run, no fallback to cursor-based pagination
# X (from checkpoint) continue from cursor-based pagination
# - retrying
# - no retrying
# things to check:
# checkpoint state on return
# checkpoint progress (no infinite loop)
class DocMetadata(BaseModel):
repo: str
def get_nextUrl_key(pag_list: PaginatedList[PullRequest | Issue]) -> str:
if "_PaginatedList__nextUrl" in pag_list.__dict__:
return "_PaginatedList__nextUrl"
for key in pag_list.__dict__:
if "__nextUrl" in key:
return key
for key in pag_list.__dict__:
if "nextUrl" in key:
return key
return ""
def get_nextUrl(
pag_list: PaginatedList[PullRequest | Issue], nextUrl_key: str
) -> str | None:
return getattr(pag_list, nextUrl_key) if nextUrl_key else None
def set_nextUrl(
pag_list: PaginatedList[PullRequest | Issue], nextUrl_key: str, nextUrl: str
) -> None:
if nextUrl_key:
setattr(pag_list, nextUrl_key, nextUrl)
elif nextUrl:
raise ValueError("Next URL key not found: " + str(pag_list.__dict__))
def _paginate_until_error(
git_objs: Callable[[], PaginatedList[PullRequest | Issue]],
cursor_url: str | None,
prev_num_objs: int,
cursor_url_callback: Callable[[str | None, int], None],
retrying: bool = False,
) -> Generator[PullRequest | Issue, None, None]:
num_objs = prev_num_objs
pag_list = git_objs()
nextUrl_key = get_nextUrl_key(pag_list)
if cursor_url:
set_nextUrl(pag_list, nextUrl_key, cursor_url)
elif retrying:
# if we are retrying, we want to skip the objects retrieved
# over previous calls. Unfortunately, this WILL retrieve all
# pages before the one we are resuming from, so we really
# don't want this case to be hit often
logging.warning(
"Retrying from a previous cursor-based pagination call. "
"This will retrieve all pages before the one we are resuming from, "
"which may take a while and consume many API calls."
)
pag_list = cast(PaginatedList[PullRequest | Issue], pag_list[prev_num_objs:])
num_objs = 0
try:
# this for loop handles cursor-based pagination
for issue_or_pr in pag_list:
num_objs += 1
yield issue_or_pr
# used to store the current cursor url in the checkpoint. This value
# is updated during iteration over pag_list.
cursor_url_callback(get_nextUrl(pag_list, nextUrl_key), num_objs)
if num_objs % CURSOR_LOG_FREQUENCY == 0:
logging.info(
f"Retrieved {num_objs} objects with current cursor url: {get_nextUrl(pag_list, nextUrl_key)}"
)
except Exception as e:
logging.exception(f"Error during cursor-based pagination: {e}")
if num_objs - prev_num_objs > 0:
raise
if get_nextUrl(pag_list, nextUrl_key) is not None and not retrying:
logging.info(
"Assuming that this error is due to cursor "
"expiration because no objects were retrieved. "
"Retrying from the first page."
)
yield from _paginate_until_error(
git_objs, None, prev_num_objs, cursor_url_callback, retrying=True
)
return
# for no cursor url or if we reach this point after a retry, raise the error
raise
def _get_batch_rate_limited(
# We pass in a callable because we want git_objs to produce a fresh
# PaginatedList each time it's called to avoid using the same object for cursor-based pagination
# from a partial offset-based pagination call.
git_objs: Callable[[], PaginatedList],
page_num: int,
cursor_url: str | None,
prev_num_objs: int,
cursor_url_callback: Callable[[str | None, int], None],
github_client: Github,
attempt_num: int = 0,
) -> Generator[PullRequest | Issue, None, None]:
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching batch too many times. Something is going wrong with fetching objects from Github"
)
try:
if cursor_url:
# when this is set, we are resuming from an earlier
# cursor-based pagination call.
yield from _paginate_until_error(
git_objs, cursor_url, prev_num_objs, cursor_url_callback
)
return
objs = list(git_objs().get_page(page_num))
# fetch all data here to disable lazy loading later
# this is needed to capture the rate limit exception here (if one occurs)
for obj in objs:
if hasattr(obj, "raw_data"):
getattr(obj, "raw_data")
yield from objs
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
yield from _get_batch_rate_limited(
git_objs,
page_num,
cursor_url,
prev_num_objs,
cursor_url_callback,
github_client,
attempt_num + 1,
)
except GithubException as e:
if not (
e.status == 422
and (
"cursor" in (e.message or "")
or "cursor" in (e.data or {}).get("message", "")
)
):
raise
# Fallback to a cursor-based pagination strategy
# This can happen for "large datasets," but there's no documentation
# On the error on the web as far as we can tell.
# Error message:
# "Pagination with the page parameter is not supported for large datasets,
# please use cursor based pagination (after/before)"
yield from _paginate_until_error(
git_objs, cursor_url, prev_num_objs, cursor_url_callback
)
def _get_userinfo(user: NamedUser) -> dict[str, str]:
def _safe_get(attr_name: str) -> str | None:
try:
return cast(str | None, getattr(user, attr_name))
except GithubException:
logging.debug(f"Error getting {attr_name} for user")
return None
return {
k: v
for k, v in {
"login": _safe_get("login"),
"name": _safe_get("name"),
"email": _safe_get("email"),
}.items()
if v is not None
}
def _convert_pr_to_document(
pull_request: PullRequest, repo_external_access: ExternalAccess | None
) -> Document:
repo_name = pull_request.base.repo.full_name if pull_request.base else ""
doc_metadata = DocMetadata(repo=repo_name)
file_content_byte = pull_request.body.encode('utf-8') if pull_request.body else b""
name = sanitize_filename(pull_request.title, "md")
return Document(
id=pull_request.html_url,
blob= file_content_byte,
source=DocumentSource.GITHUB,
external_access=repo_external_access,
semantic_identifier=f"{pull_request.number}:{name}",
# updated_at is UTC time but is timezone unaware, explicitly add UTC
# as there is logic in indexing to prevent wrong timestamped docs
# due to local time discrepancies with UTC
doc_updated_at=(
pull_request.updated_at.replace(tzinfo=timezone.utc)
if pull_request.updated_at
else None
),
extension=".md",
# this metadata is used in perm sync
size_bytes=len(file_content_byte) if file_content_byte else 0,
primary_owners=[],
doc_metadata=doc_metadata.model_dump(),
metadata={
k: [str(vi) for vi in v] if isinstance(v, list) else str(v)
for k, v in {
"object_type": "PullRequest",
"id": pull_request.number,
"merged": pull_request.merged,
"state": pull_request.state,
"user": _get_userinfo(pull_request.user) if pull_request.user else None,
"assignees": [
_get_userinfo(assignee) for assignee in pull_request.assignees
],
"repo": (
pull_request.base.repo.full_name if pull_request.base else None
),
"num_commits": str(pull_request.commits),
"num_files_changed": str(pull_request.changed_files),
"labels": [label.name for label in pull_request.labels],
"created_at": (
pull_request.created_at.replace(tzinfo=timezone.utc)
if pull_request.created_at
else None
),
"updated_at": (
pull_request.updated_at.replace(tzinfo=timezone.utc)
if pull_request.updated_at
else None
),
"closed_at": (
pull_request.closed_at.replace(tzinfo=timezone.utc)
if pull_request.closed_at
else None
),
"merged_at": (
pull_request.merged_at.replace(tzinfo=timezone.utc)
if pull_request.merged_at
else None
),
"merged_by": (
_get_userinfo(pull_request.merged_by)
if pull_request.merged_by
else None
),
}.items()
if v is not None
},
)
def _fetch_issue_comments(issue: Issue) -> str:
comments = issue.get_comments()
return "\nComment: ".join(comment.body for comment in comments)
def _convert_issue_to_document(
issue: Issue, repo_external_access: ExternalAccess | None
) -> Document:
repo_name = issue.repository.full_name if issue.repository else ""
doc_metadata = DocMetadata(repo=repo_name)
file_content_byte = issue.body.encode('utf-8') if issue.body else b""
name = sanitize_filename(issue.title, "md")
return Document(
id=issue.html_url,
blob=file_content_byte,
source=DocumentSource.GITHUB,
extension=".md",
external_access=repo_external_access,
semantic_identifier=f"{issue.number}:{name}",
# updated_at is UTC time but is timezone unaware
doc_updated_at=issue.updated_at.replace(tzinfo=timezone.utc),
# this metadata is used in perm sync
doc_metadata=doc_metadata.model_dump(),
size_bytes=len(file_content_byte) if file_content_byte else 0,
primary_owners=[_get_userinfo(issue.user) if issue.user else None],
metadata={
k: [str(vi) for vi in v] if isinstance(v, list) else str(v)
for k, v in {
"object_type": "Issue",
"id": issue.number,
"state": issue.state,
"user": _get_userinfo(issue.user) if issue.user else None,
"assignees": [_get_userinfo(assignee) for assignee in issue.assignees],
"repo": issue.repository.full_name if issue.repository else None,
"labels": [label.name for label in issue.labels],
"created_at": (
issue.created_at.replace(tzinfo=timezone.utc)
if issue.created_at
else None
),
"updated_at": (
issue.updated_at.replace(tzinfo=timezone.utc)
if issue.updated_at
else None
),
"closed_at": (
issue.closed_at.replace(tzinfo=timezone.utc)
if issue.closed_at
else None
),
"closed_by": (
_get_userinfo(issue.closed_by) if issue.closed_by else None
),
}.items()
if v is not None
},
)
class GithubConnectorStage(Enum):
START = "start"
PRS = "prs"
ISSUES = "issues"
class GithubConnectorCheckpoint(ConnectorCheckpoint):
stage: GithubConnectorStage
curr_page: int
cached_repo_ids: list[int] | None = None
cached_repo: SerializedRepository | None = None
# Used for the fallback cursor-based pagination strategy
num_retrieved: int
cursor_url: str | None = None
def reset(self) -> None:
"""
Resets curr_page, num_retrieved, and cursor_url to their initial values (0, 0, None)
"""
self.curr_page = 0
self.num_retrieved = 0
self.cursor_url = None
def make_cursor_url_callback(
checkpoint: GithubConnectorCheckpoint,
) -> Callable[[str | None, int], None]:
def cursor_url_callback(cursor_url: str | None, num_objs: int) -> None:
# we want to maintain the old cursor url so code after retrieval
# can determine that we are using the fallback cursor-based pagination strategy
if cursor_url:
checkpoint.cursor_url = cursor_url
checkpoint.num_retrieved = num_objs
return cursor_url_callback
class GithubConnector(CheckpointedConnectorWithPermSyncGH[GithubConnectorCheckpoint]):
def __init__(
self,
repo_owner: str,
repositories: str | None = None,
state_filter: str = "all",
include_prs: bool = True,
include_issues: bool = False,
) -> None:
self.repo_owner = repo_owner
self.repositories = repositories
self.state_filter = state_filter
self.include_prs = include_prs
self.include_issues = include_issues
self.github_client: Github | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
# defaults to 30 items per page, can be set to as high as 100
token = credentials["github_access_token"]
auth = Auth.Token(token)
if GITHUB_CONNECTOR_BASE_URL:
self.github_client = Github(
auth=auth,
base_url=GITHUB_CONNECTOR_BASE_URL,
per_page=ITEMS_PER_PAGE,
)
else:
self.github_client = Github(
auth=auth,
per_page=ITEMS_PER_PAGE,
)
return None
def get_github_repo(
self, github_client: Github, attempt_num: int = 0
) -> Repository.Repository:
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching repo too many times. Something is going wrong with fetching objects from Github"
)
try:
return github_client.get_repo(f"{self.repo_owner}/{self.repositories}")
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
return self.get_github_repo(github_client, attempt_num + 1)
def get_github_repos(
self, github_client: Github, attempt_num: int = 0
) -> list[Repository.Repository]:
"""Get specific repositories based on comma-separated repo_name string."""
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching repos too many times. Something is going wrong with fetching objects from Github"
)
try:
repos = []
# Split repo_name by comma and strip whitespace
repo_names = [
name.strip() for name in (cast(str, self.repositories)).split(",")
]
for repo_name in repo_names:
if repo_name: # Skip empty strings
try:
repo = github_client.get_repo(f"{self.repo_owner}/{repo_name}")
repos.append(repo)
except GithubException as e:
logging.warning(
f"Could not fetch repo {self.repo_owner}/{repo_name}: {e}"
)
return repos
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
return self.get_github_repos(github_client, attempt_num + 1)
def get_all_repos(
self, github_client: Github, attempt_num: int = 0
) -> list[Repository.Repository]:
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
raise RuntimeError(
"Re-tried fetching repos too many times. Something is going wrong with fetching objects from Github"
)
try:
# Try to get organization first
try:
org = github_client.get_organization(self.repo_owner)
return list(org.get_repos())
except GithubException:
# If not an org, try as a user
user = github_client.get_user(self.repo_owner)
return list(user.get_repos())
except RateLimitExceededException:
sleep_after_rate_limit_exception(github_client)
return self.get_all_repos(github_client, attempt_num + 1)
def _pull_requests_func(
self, repo: Repository.Repository
) -> Callable[[], PaginatedList[PullRequest]]:
return lambda: repo.get_pulls(
state=self.state_filter, sort="updated", direction="desc"
)
def _issues_func(
self, repo: Repository.Repository
) -> Callable[[], PaginatedList[Issue]]:
return lambda: repo.get_issues(
state=self.state_filter, sort="updated", direction="desc"
)
def _fetch_from_github(
self,
checkpoint: GithubConnectorCheckpoint,
start: datetime | None = None,
end: datetime | None = None,
include_permissions: bool = False,
) -> Generator[Document | ConnectorFailure, None, GithubConnectorCheckpoint]:
if self.github_client is None:
raise ConnectorMissingCredentialError("GitHub")
checkpoint = copy.deepcopy(checkpoint)
# First run of the connector, fetch all repos and store in checkpoint
if checkpoint.cached_repo_ids is None:
repos = []
if self.repositories:
if "," in self.repositories:
# Multiple repositories specified
repos = self.get_github_repos(self.github_client)
else:
# Single repository (backward compatibility)
repos = [self.get_github_repo(self.github_client)]
else:
# All repositories
repos = self.get_all_repos(self.github_client)
if not repos:
checkpoint.has_more = False
return checkpoint
curr_repo = repos.pop()
checkpoint.cached_repo_ids = [repo.id for repo in repos]
checkpoint.cached_repo = SerializedRepository(
id=curr_repo.id,
headers=curr_repo.raw_headers,
raw_data=curr_repo.raw_data,
)
checkpoint.stage = GithubConnectorStage.PRS
checkpoint.curr_page = 0
# save checkpoint with repo ids retrieved
return checkpoint
if checkpoint.cached_repo is None:
raise ValueError("No repo saved in checkpoint")
# Deserialize the repository from the checkpoint
repo = deserialize_repository(checkpoint.cached_repo, self.github_client)
cursor_url_callback = make_cursor_url_callback(checkpoint)
repo_external_access: ExternalAccess | None = None
if include_permissions:
repo_external_access = get_external_access_permission(
repo, self.github_client
)
if self.include_prs and checkpoint.stage == GithubConnectorStage.PRS:
logging.info(f"Fetching PRs for repo: {repo.name}")
pr_batch = _get_batch_rate_limited(
self._pull_requests_func(repo),
checkpoint.curr_page,
checkpoint.cursor_url,
checkpoint.num_retrieved,
cursor_url_callback,
self.github_client,
)
checkpoint.curr_page += 1 # NOTE: not used for cursor-based fallback
done_with_prs = False
num_prs = 0
pr = None
print("start: ", start)
for pr in pr_batch:
num_prs += 1
print("-"*40)
print("PR name", pr.title)
print("updated at", pr.updated_at)
print("-"*40)
print("\n")
# we iterate backwards in time, so at this point we stop processing prs
if (
start is not None
and pr.updated_at
and pr.updated_at.replace(tzinfo=timezone.utc) <= start
):
done_with_prs = True
break
# Skip PRs updated after the end date
if (
end is not None
and pr.updated_at
and pr.updated_at.replace(tzinfo=timezone.utc) > end
):
continue
try:
yield _convert_pr_to_document(
cast(PullRequest, pr), repo_external_access
)
except Exception as e:
error_msg = f"Error converting PR to document: {e}"
logging.exception(error_msg)
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=str(pr.id), document_link=pr.html_url
),
failure_message=error_msg,
exception=e,
)
continue
# If we reach this point with a cursor url in the checkpoint, we were using
# the fallback cursor-based pagination strategy. That strategy tries to get all
# PRs, so having curosr_url set means we are done with prs. However, we need to
# return AFTER the checkpoint reset to avoid infinite loops.
# if we found any PRs on the page and there are more PRs to get, return the checkpoint.
# In offset mode, while indexing without time constraints, the pr batch
# will be empty when we're done.
used_cursor = checkpoint.cursor_url is not None
if num_prs > 0 and not done_with_prs and not used_cursor:
return checkpoint
# if we went past the start date during the loop or there are no more
# prs to get, we move on to issues
checkpoint.stage = GithubConnectorStage.ISSUES
checkpoint.reset()
if used_cursor:
# save the checkpoint after changing stage; next run will continue from issues
return checkpoint
checkpoint.stage = GithubConnectorStage.ISSUES
if self.include_issues and checkpoint.stage == GithubConnectorStage.ISSUES:
logging.info(f"Fetching issues for repo: {repo.name}")
issue_batch = list(
_get_batch_rate_limited(
self._issues_func(repo),
checkpoint.curr_page,
checkpoint.cursor_url,
checkpoint.num_retrieved,
cursor_url_callback,
self.github_client,
)
)
checkpoint.curr_page += 1
done_with_issues = False
num_issues = 0
for issue in issue_batch:
num_issues += 1
issue = cast(Issue, issue)
# we iterate backwards in time, so at this point we stop processing prs
if (
start is not None
and issue.updated_at.replace(tzinfo=timezone.utc) <= start
):
done_with_issues = True
break
# Skip PRs updated after the end date
if (
end is not None
and issue.updated_at.replace(tzinfo=timezone.utc) > end
):
continue
if issue.pull_request is not None:
# PRs are handled separately
continue
try:
yield _convert_issue_to_document(issue, repo_external_access)
except Exception as e:
error_msg = f"Error converting issue to document: {e}"
logging.exception(error_msg)
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=str(issue.id),
document_link=issue.html_url,
),
failure_message=error_msg,
exception=e,
)
continue
# if we found any issues on the page, and we're not done, return the checkpoint.
# don't return if we're using cursor-based pagination to avoid infinite loops
if num_issues > 0 and not done_with_issues and not checkpoint.cursor_url:
return checkpoint
# if we went past the start date during the loop or there are no more
# issues to get, we move on to the next repo
checkpoint.stage = GithubConnectorStage.PRS
checkpoint.reset()
checkpoint.has_more = len(checkpoint.cached_repo_ids) > 0
if checkpoint.cached_repo_ids:
next_id = checkpoint.cached_repo_ids.pop()
next_repo = self.github_client.get_repo(next_id)
checkpoint.cached_repo = SerializedRepository(
id=next_id,
headers=next_repo.raw_headers,
raw_data=next_repo.raw_data,
)
checkpoint.stage = GithubConnectorStage.PRS
checkpoint.reset()
if checkpoint.cached_repo_ids:
logging.info(
f"{len(checkpoint.cached_repo_ids)} repos remaining (IDs: {checkpoint.cached_repo_ids})"
)
else:
logging.info("No more repos remaining")
return checkpoint
def _load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GithubConnectorCheckpoint,
include_permissions: bool = False,
) -> CheckpointOutput[GithubConnectorCheckpoint]:
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
# add a day for timezone safety
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc) + ONE_DAY
# Move start time back by 3 hours, since some Issues/PRs are getting dropped
# Could be due to delayed processing on GitHub side
# The non-updated issues since last poll will be shortcut-ed and not embedded
# adjusted_start_datetime = start_datetime - timedelta(hours=3)
adjusted_start_datetime = start_datetime
epoch = datetime.fromtimestamp(0, tz=timezone.utc)
if adjusted_start_datetime < epoch:
adjusted_start_datetime = epoch
return self._fetch_from_github(
checkpoint,
start=adjusted_start_datetime,
end=end_datetime,
include_permissions=include_permissions,
)
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GithubConnectorCheckpoint,
) -> CheckpointOutput[GithubConnectorCheckpoint]:
return self._load_from_checkpoint(
start, end, checkpoint, include_permissions=False
)
@override
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: GithubConnectorCheckpoint,
) -> CheckpointOutput[GithubConnectorCheckpoint]:
return self._load_from_checkpoint(
start, end, checkpoint, include_permissions=True
)
def validate_connector_settings(self) -> None:
if self.github_client is None:
raise ConnectorMissingCredentialError("GitHub credentials not loaded.")
if not self.repo_owner:
raise ConnectorValidationError(
"Invalid connector settings: 'repo_owner' must be provided."
)
try:
if self.repositories:
if "," in self.repositories:
# Multiple repositories specified
repo_names = [name.strip() for name in self.repositories.split(",")]
if not repo_names:
raise ConnectorValidationError(
"Invalid connector settings: No valid repository names provided."
)
# Validate at least one repository exists and is accessible
valid_repos = False
validation_errors = []
for repo_name in repo_names:
if not repo_name:
continue
try:
test_repo = self.github_client.get_repo(
f"{self.repo_owner}/{repo_name}"
)
logging.info(
f"Successfully accessed repository: {self.repo_owner}/{repo_name}"
)
test_repo.get_contents("")
valid_repos = True
# If at least one repo is valid, we can proceed
break
except GithubException as e:
validation_errors.append(
f"Repository '{repo_name}': {e.data.get('message', str(e))}"
)
if not valid_repos:
error_msg = (
"None of the specified repositories could be accessed: "
)
error_msg += ", ".join(validation_errors)
raise ConnectorValidationError(error_msg)
else:
# Single repository (backward compatibility)
test_repo = self.github_client.get_repo(
f"{self.repo_owner}/{self.repositories}"
)
test_repo.get_contents("")
else:
# Try to get organization first
try:
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/github/models.py | common/data_source/github/models.py | from typing import Any
from github import Repository
from github.Requester import Requester
from pydantic import BaseModel
class SerializedRepository(BaseModel):
# id is part of the raw_data as well, just pulled out for convenience
id: int
headers: dict[str, str | int]
raw_data: dict[str, Any]
def to_Repository(self, requester: Requester) -> Repository.Repository:
return Repository.Repository(
requester, self.headers, self.raw_data, completed=True
) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/github/utils.py | common/data_source/github/utils.py | import logging
from github import Github
from github.Repository import Repository
from common.data_source.models import ExternalAccess
from .models import SerializedRepository
def get_external_access_permission(
repo: Repository, github_client: Github
) -> ExternalAccess:
"""
Get the external access permission for a repository.
This functionality requires Enterprise Edition.
"""
# RAGFlow doesn't implement the Onyx EE external-permissions system.
# Default to private/unknown permissions.
return ExternalAccess.empty()
def deserialize_repository(
cached_repo: SerializedRepository, github_client: Github
) -> Repository:
"""
Deserialize a SerializedRepository back into a Repository object.
"""
# Try to access the requester - different PyGithub versions may use different attribute names
try:
# Try to get the requester using getattr to avoid linter errors
requester = getattr(github_client, "_requester", None)
if requester is None:
requester = getattr(github_client, "_Github__requester", None)
if requester is None:
# If we can't find the requester attribute, we need to fall back to recreating the repo
raise AttributeError("Could not find requester attribute")
return cached_repo.to_Repository(requester)
except Exception as e:
# If all else fails, re-fetch the repo directly
logging.warning("Failed to deserialize repository: %s. Attempting to re-fetch.", e)
repo_id = cached_repo.id
return github_client.get_repo(repo_id) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/github/rate_limit_utils.py | common/data_source/github/rate_limit_utils.py | import time
import logging
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from github import Github
def sleep_after_rate_limit_exception(github_client: Github) -> None:
"""
Sleep until the GitHub rate limit resets.
Args:
github_client: The GitHub client that hit the rate limit
"""
sleep_time = github_client.get_rate_limit().core.reset.replace(
tzinfo=timezone.utc
) - datetime.now(tz=timezone.utc)
sleep_time += timedelta(minutes=1) # add an extra minute just to be safe
logging.info(
"Ran into Github rate-limit. Sleeping %s seconds.", sleep_time.seconds
)
time.sleep(sleep_time.total_seconds()) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/github/__init__.py | common/data_source/github/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/jira/connector.py | common/data_source/jira/connector.py | """Checkpointed Jira connector that emits markdown blobs for each issue."""
from __future__ import annotations
import argparse
import copy
import logging
import os
import re
from collections.abc import Callable, Generator, Iterable, Iterator, Sequence
from datetime import datetime, timedelta, timezone
from typing import Any
from zoneinfo import ZoneInfo, ZoneInfoNotFoundError
from jira import JIRA
from jira.resources import Issue
from pydantic import Field
from common.data_source.config import (
INDEX_BATCH_SIZE,
JIRA_CONNECTOR_LABELS_TO_SKIP,
JIRA_CONNECTOR_MAX_TICKET_SIZE,
JIRA_TIMEZONE_OFFSET,
ONE_HOUR,
DocumentSource,
)
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
ConnectorValidationError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
CheckpointOutputWrapper,
SecondsSinceUnixEpoch,
SlimConnectorWithPermSync,
)
from common.data_source.jira.utils import (
JIRA_CLOUD_API_VERSION,
JIRA_SERVER_API_VERSION,
build_issue_url,
extract_body_text,
extract_named_value,
extract_user,
format_attachments,
format_comments,
parse_jira_datetime,
should_skip_issue,
)
from common.data_source.models import (
ConnectorCheckpoint,
ConnectorFailure,
Document,
DocumentFailure,
SlimDocument,
)
from common.data_source.utils import is_atlassian_cloud_url, is_atlassian_date_error, scoped_url
logger = logging.getLogger(__name__)
_DEFAULT_FIELDS = "summary,description,updated,created,status,priority,assignee,reporter,labels,issuetype,project,comment,attachment"
_SLIM_FIELDS = "key,project"
_MAX_RESULTS_FETCH_IDS = 5000
_JIRA_SLIM_PAGE_SIZE = 500
_JIRA_FULL_PAGE_SIZE = 50
_DEFAULT_ATTACHMENT_SIZE_LIMIT = 10 * 1024 * 1024 # 10MB
class JiraCheckpoint(ConnectorCheckpoint):
"""Checkpoint that tracks which slice of the current JQL result set was emitted."""
start_at: int = 0
cursor: str | None = None
ids_done: bool = False
all_issue_ids: list[list[str]] = Field(default_factory=list)
_TZ_OFFSET_PATTERN = re.compile(r"([+-])(\d{2})(:?)(\d{2})$")
class JiraConnector(CheckpointedConnectorWithPermSync, SlimConnectorWithPermSync):
"""Retrieve Jira issues and emit them as Markdown documents."""
def __init__(
self,
jira_base_url: str,
project_key: str | None = None,
jql_query: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
include_comments: bool = True,
include_attachments: bool = False,
labels_to_skip: Sequence[str] | None = None,
comment_email_blacklist: Sequence[str] | None = None,
scoped_token: bool = False,
attachment_size_limit: int | None = None,
timezone_offset: float | None = None,
) -> None:
if not jira_base_url:
raise ConnectorValidationError("Jira base URL must be provided.")
self.jira_base_url = jira_base_url.rstrip("/")
self.project_key = project_key
self.jql_query = jql_query
self.batch_size = batch_size
self.include_comments = include_comments
self.include_attachments = include_attachments
configured_labels = labels_to_skip or JIRA_CONNECTOR_LABELS_TO_SKIP
self.labels_to_skip = {label.lower() for label in configured_labels}
self.comment_email_blacklist = {email.lower() for email in comment_email_blacklist or []}
self.scoped_token = scoped_token
self.jira_client: JIRA | None = None
self.max_ticket_size = JIRA_CONNECTOR_MAX_TICKET_SIZE
self.attachment_size_limit = attachment_size_limit if attachment_size_limit and attachment_size_limit > 0 else _DEFAULT_ATTACHMENT_SIZE_LIMIT
self._fields_param = _DEFAULT_FIELDS
self._slim_fields = _SLIM_FIELDS
tz_offset_value = float(timezone_offset) if timezone_offset is not None else float(JIRA_TIMEZONE_OFFSET)
self.timezone_offset = tz_offset_value
self.timezone = timezone(offset=timedelta(hours=tz_offset_value))
self._timezone_overridden = timezone_offset is not None
# -------------------------------------------------------------------------
# Connector lifecycle helpers
# -------------------------------------------------------------------------
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Instantiate the Jira client using either an API token or username/password."""
jira_url_for_client = self.jira_base_url
if self.scoped_token:
if is_atlassian_cloud_url(self.jira_base_url):
try:
jira_url_for_client = scoped_url(self.jira_base_url, "jira")
except ValueError as exc:
raise ConnectorValidationError(str(exc)) from exc
else:
logger.warning("[Jira] Scoped token requested but Jira base URL does not appear to be an Atlassian Cloud domain; scoped token ignored.")
user_email = credentials.get("jira_user_email") or credentials.get("username")
api_token = credentials.get("jira_api_token") or credentials.get("token") or credentials.get("api_token")
password = credentials.get("jira_password") or credentials.get("password")
rest_api_version = credentials.get("rest_api_version")
if not rest_api_version:
rest_api_version = JIRA_CLOUD_API_VERSION if api_token else JIRA_SERVER_API_VERSION
options: dict[str, Any] = {"rest_api_version": rest_api_version}
try:
if user_email and api_token:
self.jira_client = JIRA(
server=jira_url_for_client,
basic_auth=(user_email, api_token),
options=options,
)
elif api_token:
self.jira_client = JIRA(
server=jira_url_for_client,
token_auth=api_token,
options=options,
)
elif user_email and password:
self.jira_client = JIRA(
server=jira_url_for_client,
basic_auth=(user_email, password),
options=options,
)
else:
raise ConnectorMissingCredentialError("Jira credentials must include either an API token or username/password.")
except Exception as exc: # pragma: no cover - jira lib raises many types
raise ConnectorMissingCredentialError(f"Jira: {exc}") from exc
self._sync_timezone_from_server()
return None
def validate_connector_settings(self) -> None:
"""Validate connectivity by fetching basic Jira info."""
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
try:
if self.jql_query:
dummy_checkpoint = self.build_dummy_checkpoint()
checkpoint_callback = self._make_checkpoint_callback(dummy_checkpoint)
iterator = self._perform_jql_search(
jql=self.jql_query,
start=0,
max_results=1,
fields="key",
all_issue_ids=dummy_checkpoint.all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=dummy_checkpoint.cursor,
ids_done=dummy_checkpoint.ids_done,
)
next(iter(iterator), None)
elif self.project_key:
self.jira_client.project(self.project_key)
else:
self.jira_client.projects()
except Exception as exc: # pragma: no cover - dependent on Jira responses
self._handle_validation_error(exc)
# -------------------------------------------------------------------------
# Checkpointed connector implementation
# -------------------------------------------------------------------------
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: JiraCheckpoint,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
"""Load Jira issues, emitting a Document per issue."""
try:
return (yield from self._load_with_retry(start, end, checkpoint))
except Exception as exc:
logger.exception(f"[Jira] Jira query ultimately failed: {exc}")
yield ConnectorFailure(
failure_message=f"Failed to query Jira: {exc}",
exception=exc,
)
return JiraCheckpoint(has_more=False, start_at=checkpoint.start_at)
def load_from_checkpoint_with_perm_sync(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: JiraCheckpoint,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
"""Permissions are not synced separately, so reuse the standard loader."""
return (yield from self.load_from_checkpoint(start=start, end=end, checkpoint=checkpoint))
def _load_with_retry(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: JiraCheckpoint,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
attempt_start = start
retried_with_buffer = False
attempt = 0
while True:
attempt += 1
jql = self._build_jql(attempt_start, end)
logger.info(f"[Jira] Executing Jira JQL attempt {attempt} (buffered_retry={retried_with_buffer})[start and end parameters redacted]")
try:
return (yield from self._load_from_checkpoint_internal(jql, checkpoint, start_filter=start))
except Exception as exc:
if attempt_start is not None and not retried_with_buffer and is_atlassian_date_error(exc):
attempt_start = attempt_start - ONE_HOUR
retried_with_buffer = True
logger.info(f"[Jira] Atlassian date error detected; retrying with start={attempt_start}.")
continue
raise
def _handle_validation_error(self, exc: Exception) -> None:
status_code = getattr(exc, "status_code", None)
if status_code == 401:
raise InsufficientPermissionsError("Jira credential appears to be invalid or expired (HTTP 401).") from exc
if status_code == 403:
raise InsufficientPermissionsError("Jira token does not have permission to access the requested resources (HTTP 403).") from exc
if status_code == 404:
raise ConnectorValidationError("Jira resource not found (HTTP 404).") from exc
if status_code == 429:
raise ConnectorValidationError("Jira rate limit exceeded during validation (HTTP 429).") from exc
message = getattr(exc, "text", str(exc))
if not message:
raise UnexpectedValidationError("Unexpected Jira validation error.") from exc
raise ConnectorValidationError(f"Jira validation failed: {message}") from exc
def _load_from_checkpoint_internal(
self,
jql: str,
checkpoint: JiraCheckpoint,
start_filter: SecondsSinceUnixEpoch | None = None,
) -> Generator[Document | ConnectorFailure, None, JiraCheckpoint]:
assert self.jira_client, "load_credentials must be called before loading issues."
page_size = self._full_page_size()
new_checkpoint = copy.deepcopy(checkpoint)
starting_offset = new_checkpoint.start_at or 0
current_offset = starting_offset
checkpoint_callback = self._make_checkpoint_callback(new_checkpoint)
issue_iter = self._perform_jql_search(
jql=jql,
start=current_offset,
max_results=page_size,
fields=self._fields_param,
all_issue_ids=new_checkpoint.all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=new_checkpoint.cursor,
ids_done=new_checkpoint.ids_done,
)
start_cutoff = float(start_filter) if start_filter is not None else None
for issue in issue_iter:
current_offset += 1
issue_key = getattr(issue, "key", "unknown")
if should_skip_issue(issue, self.labels_to_skip):
continue
issue_updated = parse_jira_datetime(issue.raw.get("fields", {}).get("updated"))
if start_cutoff is not None and issue_updated is not None and issue_updated.timestamp() <= start_cutoff:
# Jira JQL only supports minute precision, so we discard already-processed
# issues here based on the original second-level cutoff.
continue
try:
document = self._issue_to_document(issue)
except Exception as exc: # pragma: no cover - defensive
logger.exception(f"[Jira] Failed to convert Jira issue {issue_key}: {exc}")
yield ConnectorFailure(
failure_message=f"Failed to convert Jira issue {issue_key}: {exc}",
failed_document=DocumentFailure(
document_id=issue_key,
document_link=build_issue_url(self.jira_base_url, issue_key),
),
exception=exc,
)
continue
if document is not None:
yield document
if self.include_attachments:
for attachment_document in self._attachment_documents(issue):
if attachment_document is not None:
yield attachment_document
self._update_checkpoint_for_next_run(
checkpoint=new_checkpoint,
current_offset=current_offset,
starting_offset=starting_offset,
page_size=page_size,
)
new_checkpoint.start_at = current_offset
return new_checkpoint
def build_dummy_checkpoint(self) -> JiraCheckpoint:
"""Create an empty checkpoint used to kick off ingestion."""
return JiraCheckpoint(has_more=True, start_at=0)
def validate_checkpoint_json(self, checkpoint_json: str) -> JiraCheckpoint:
"""Validate a serialized checkpoint."""
return JiraCheckpoint.model_validate_json(checkpoint_json)
# -------------------------------------------------------------------------
# Slim connector implementation
# -------------------------------------------------------------------------
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: Any = None, # noqa: ARG002 - maintained for interface compatibility
) -> Generator[list[SlimDocument], None, None]:
"""Return lightweight references to Jira issues (used for permission syncing)."""
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
start_ts = start if start is not None else 0
end_ts = end if end is not None else datetime.now(timezone.utc).timestamp()
jql = self._build_jql(start_ts, end_ts)
checkpoint = self.build_dummy_checkpoint()
checkpoint_callback = self._make_checkpoint_callback(checkpoint)
prev_offset = 0
current_offset = 0
slim_batch: list[SlimDocument] = []
while checkpoint.has_more:
for issue in self._perform_jql_search(
jql=jql,
start=current_offset,
max_results=_JIRA_SLIM_PAGE_SIZE,
fields=self._slim_fields,
all_issue_ids=checkpoint.all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=checkpoint.cursor,
ids_done=checkpoint.ids_done,
):
current_offset += 1
if should_skip_issue(issue, self.labels_to_skip):
continue
doc_id = build_issue_url(self.jira_base_url, issue.key)
slim_batch.append(SlimDocument(id=doc_id))
if len(slim_batch) >= _JIRA_SLIM_PAGE_SIZE:
yield slim_batch
slim_batch = []
self._update_checkpoint_for_next_run(
checkpoint=checkpoint,
current_offset=current_offset,
starting_offset=prev_offset,
page_size=_JIRA_SLIM_PAGE_SIZE,
)
prev_offset = current_offset
if slim_batch:
yield slim_batch
# -------------------------------------------------------------------------
# Internal helpers
# -------------------------------------------------------------------------
def _build_jql(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> str:
clauses: list[str] = []
if self.jql_query:
clauses.append(f"({self.jql_query})")
elif self.project_key:
clauses.append(f'project = "{self.project_key}"')
else:
raise ConnectorValidationError("Either project_key or jql_query must be provided for Jira connector.")
if self.labels_to_skip:
labels = ", ".join(f'"{label}"' for label in self.labels_to_skip)
clauses.append(f"labels NOT IN ({labels})")
if start is not None:
clauses.append(f'updated >= "{self._format_jql_time(start)}"')
if end is not None:
clauses.append(f'updated <= "{self._format_jql_time(end)}"')
if not clauses:
raise ConnectorValidationError("Unable to build Jira JQL query.")
jql = " AND ".join(clauses)
if "order by" not in jql.lower():
jql = f"{jql} ORDER BY updated ASC"
return jql
def _format_jql_time(self, timestamp: SecondsSinceUnixEpoch) -> str:
dt_utc = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
dt_local = dt_utc.astimezone(self.timezone)
# Jira only accepts minute-precision timestamps in JQL, so we format accordingly
# and rely on a post-query second-level filter to avoid duplicates.
return dt_local.strftime("%Y-%m-%d %H:%M")
def _issue_to_document(self, issue: Issue) -> Document | None:
fields = issue.raw.get("fields", {})
summary = fields.get("summary") or ""
description_text = extract_body_text(fields.get("description"))
comments_text = (
format_comments(
fields.get("comment"),
blacklist=self.comment_email_blacklist,
)
if self.include_comments
else ""
)
attachments_text = format_attachments(fields.get("attachment"))
reporter_name, reporter_email = extract_user(fields.get("reporter"))
assignee_name, assignee_email = extract_user(fields.get("assignee"))
status = extract_named_value(fields.get("status"))
priority = extract_named_value(fields.get("priority"))
issue_type = extract_named_value(fields.get("issuetype"))
project = fields.get("project") or {}
issue_url = build_issue_url(self.jira_base_url, issue.key)
metadata_lines = [
f"key: {issue.key}",
f"url: {issue_url}",
f"summary: {summary}",
f"status: {status or 'Unknown'}",
f"priority: {priority or 'Unspecified'}",
f"issue_type: {issue_type or 'Unknown'}",
f"project: {project.get('name') or ''}",
f"project_key: {project.get('key') or self.project_key or ''}",
]
if reporter_name:
metadata_lines.append(f"reporter: {reporter_name}")
if reporter_email:
metadata_lines.append(f"reporter_email: {reporter_email}")
if assignee_name:
metadata_lines.append(f"assignee: {assignee_name}")
if assignee_email:
metadata_lines.append(f"assignee_email: {assignee_email}")
if fields.get("labels"):
metadata_lines.append(f"labels: {', '.join(fields.get('labels'))}")
created_dt = parse_jira_datetime(fields.get("created"))
updated_dt = parse_jira_datetime(fields.get("updated")) or created_dt or datetime.now(timezone.utc)
metadata_lines.append(f"created: {created_dt.isoformat() if created_dt else ''}")
metadata_lines.append(f"updated: {updated_dt.isoformat() if updated_dt else ''}")
sections: list[str] = [
"---",
"\n".join(filter(None, metadata_lines)),
"---",
"",
"## Description",
description_text or "No description provided.",
]
if comments_text:
sections.extend(["", "## Comments", comments_text])
if attachments_text:
sections.extend(["", "## Attachments", attachments_text])
blob_text = "\n".join(sections).strip() + "\n"
blob = blob_text.encode("utf-8")
if len(blob) > self.max_ticket_size:
logger.info(f"[Jira] Skipping {issue.key} because it exceeds the maximum size of {self.max_ticket_size} bytes.")
return None
semantic_identifier = f"{issue.key}: {summary}" if summary else issue.key
return Document(
id=issue_url,
source=DocumentSource.JIRA,
semantic_identifier=semantic_identifier,
extension=".md",
blob=blob,
doc_updated_at=updated_dt,
size_bytes=len(blob),
)
def _attachment_documents(self, issue: Issue) -> Iterable[Document]:
attachments = issue.raw.get("fields", {}).get("attachment") or []
for attachment in attachments:
try:
document = self._attachment_to_document(issue, attachment)
if document is not None:
yield document
except Exception as exc: # pragma: no cover - defensive
failed_id = attachment.get("id") or attachment.get("filename")
issue_key = getattr(issue, "key", "unknown")
logger.warning(f"[Jira] Failed to process attachment {failed_id} for issue {issue_key}: {exc}")
def _attachment_to_document(self, issue: Issue, attachment: dict[str, Any]) -> Document | None:
if not self.include_attachments:
return None
filename = attachment.get("filename")
content_url = attachment.get("content")
if not filename or not content_url:
return None
try:
attachment_size = int(attachment.get("size", 0))
except (TypeError, ValueError):
attachment_size = 0
if attachment_size and attachment_size > self.attachment_size_limit:
logger.info(f"[Jira] Skipping attachment {filename} on {issue.key} because reported size exceeds limit ({self.attachment_size_limit} bytes).")
return None
blob = self._download_attachment(content_url)
if blob is None:
return None
if len(blob) > self.attachment_size_limit:
logger.info(f"[Jira] Skipping attachment {filename} on {issue.key} because it exceeds the size limit ({self.attachment_size_limit} bytes).")
return None
attachment_time = parse_jira_datetime(attachment.get("created")) or parse_jira_datetime(attachment.get("updated"))
updated_dt = attachment_time or parse_jira_datetime(issue.raw.get("fields", {}).get("updated")) or datetime.now(timezone.utc)
extension = os.path.splitext(filename)[1] or ""
document_id = f"{issue.key}::attachment::{attachment.get('id') or filename}"
semantic_identifier = f"{issue.key} attachment: {filename}"
return Document(
id=document_id,
source=DocumentSource.JIRA,
semantic_identifier=semantic_identifier,
extension=extension,
blob=blob,
doc_updated_at=updated_dt,
size_bytes=len(blob),
)
def _download_attachment(self, url: str) -> bytes | None:
if not self.jira_client:
raise ConnectorMissingCredentialError("Jira")
response = self.jira_client._session.get(url)
response.raise_for_status()
return response.content
def _sync_timezone_from_server(self) -> None:
if self._timezone_overridden or not self.jira_client:
return
try:
server_info = self.jira_client.server_info()
except Exception as exc: # pragma: no cover - defensive
logger.info(f"[Jira] Unable to determine timezone from server info; continuing with offset {self.timezone_offset}. Error: {exc}")
return
detected_offset = self._extract_timezone_offset(server_info)
if detected_offset is None or detected_offset == self.timezone_offset:
return
self.timezone_offset = detected_offset
self.timezone = timezone(offset=timedelta(hours=detected_offset))
logger.info(f"[Jira] Timezone offset adjusted to {detected_offset} hours using Jira server info.")
def _extract_timezone_offset(self, server_info: dict[str, Any]) -> float | None:
server_time_raw = server_info.get("serverTime")
if isinstance(server_time_raw, str):
offset = self._parse_offset_from_datetime_string(server_time_raw)
if offset is not None:
return offset
tz_name = server_info.get("timeZone")
if isinstance(tz_name, str):
offset = self._offset_from_zone_name(tz_name)
if offset is not None:
return offset
return None
@staticmethod
def _parse_offset_from_datetime_string(value: str) -> float | None:
normalized = JiraConnector._normalize_datetime_string(value)
try:
dt = datetime.fromisoformat(normalized)
except ValueError:
return None
if dt.tzinfo is None:
return 0.0
offset = dt.tzinfo.utcoffset(dt)
if offset is None:
return None
return offset.total_seconds() / 3600.0
@staticmethod
def _normalize_datetime_string(value: str) -> str:
trimmed = (value or "").strip()
if trimmed.endswith("Z"):
return f"{trimmed[:-1]}+00:00"
match = _TZ_OFFSET_PATTERN.search(trimmed)
if match and match.group(3) != ":":
sign, hours, _, minutes = match.groups()
trimmed = f"{trimmed[: match.start()]}{sign}{hours}:{minutes}"
return trimmed
@staticmethod
def _offset_from_zone_name(name: str) -> float | None:
try:
tz = ZoneInfo(name)
except (ZoneInfoNotFoundError, ValueError):
return None
reference = datetime.now(tz)
offset = reference.utcoffset()
if offset is None:
return None
return offset.total_seconds() / 3600.0
def _is_cloud_client(self) -> bool:
if not self.jira_client:
return False
rest_version = str(self.jira_client._options.get("rest_api_version", "")).strip()
return rest_version == str(JIRA_CLOUD_API_VERSION)
def _full_page_size(self) -> int:
return max(1, min(self.batch_size, _JIRA_FULL_PAGE_SIZE))
def _perform_jql_search(
self,
*,
jql: str,
start: int,
max_results: int,
fields: str | None = None,
all_issue_ids: list[list[str]] | None = None,
checkpoint_callback: Callable[[Iterable[list[str]], str | None], None] | None = None,
next_page_token: str | None = None,
ids_done: bool = False,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
is_cloud = self._is_cloud_client()
if is_cloud:
if all_issue_ids is None:
raise ValueError("all_issue_ids is required for Jira Cloud searches.")
yield from self._perform_jql_search_v3(
jql=jql,
max_results=max_results,
fields=fields,
all_issue_ids=all_issue_ids,
checkpoint_callback=checkpoint_callback,
next_page_token=next_page_token,
ids_done=ids_done,
)
else:
yield from self._perform_jql_search_v2(
jql=jql,
start=start,
max_results=max_results,
fields=fields,
)
def _perform_jql_search_v3(
self,
*,
jql: str,
max_results: int,
all_issue_ids: list[list[str]],
fields: str | None = None,
checkpoint_callback: Callable[[Iterable[list[str]], str | None], None] | None = None,
next_page_token: str | None = None,
ids_done: bool = False,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
if not ids_done:
new_ids, page_token = self._enhanced_search_ids(jql, next_page_token)
if checkpoint_callback is not None and new_ids:
checkpoint_callback(
self._chunk_issue_ids(new_ids, max_results),
page_token,
)
elif checkpoint_callback is not None:
checkpoint_callback([], page_token)
if all_issue_ids:
issue_ids = all_issue_ids.pop()
if issue_ids:
yield from self._bulk_fetch_issues(issue_ids, fields)
def _perform_jql_search_v2(
self,
*,
jql: str,
start: int,
max_results: int,
fields: str | None = None,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
issues = self.jira_client.search_issues(
jql_str=jql,
startAt=start,
maxResults=max_results,
fields=fields or self._fields_param,
expand="renderedFields",
)
for issue in issues:
yield issue
def _enhanced_search_ids(
self,
jql: str,
next_page_token: str | None,
) -> tuple[list[str], str | None]:
assert self.jira_client, "Jira client not initialized."
enhanced_search_path = self.jira_client._get_url("search/jql")
params: dict[str, str | int | None] = {
"jql": jql,
"maxResults": _MAX_RESULTS_FETCH_IDS,
"nextPageToken": next_page_token,
"fields": "id",
}
response = self.jira_client._session.get(enhanced_search_path, params=params)
response.raise_for_status()
data = response.json()
return [str(issue["id"]) for issue in data.get("issues", [])], data.get("nextPageToken")
def _bulk_fetch_issues(
self,
issue_ids: list[str],
fields: str | None,
) -> Iterable[Issue]:
assert self.jira_client, "Jira client not initialized."
if not issue_ids:
return []
bulk_fetch_path = self.jira_client._get_url("issue/bulkfetch")
payload: dict[str, Any] = {"issueIdsOrKeys": issue_ids}
payload["fields"] = fields.split(",") if fields else ["*all"]
response = self.jira_client._session.post(bulk_fetch_path, json=payload)
response.raise_for_status()
data = response.json()
return [Issue(self.jira_client._options, self.jira_client._session, raw=issue) for issue in data.get("issues", [])]
@staticmethod
def _chunk_issue_ids(issue_ids: list[str], chunk_size: int) -> Iterable[list[str]]:
if chunk_size <= 0:
chunk_size = _JIRA_FULL_PAGE_SIZE
for idx in range(0, len(issue_ids), chunk_size):
yield issue_ids[idx : idx + chunk_size]
def _make_checkpoint_callback(self, checkpoint: JiraCheckpoint) -> Callable[[Iterable[list[str]], str | None], None]:
def checkpoint_callback(
issue_ids: Iterable[list[str]] | list[list[str]],
page_token: str | None,
) -> None:
for id_batch in issue_ids:
checkpoint.all_issue_ids.append(list(id_batch))
checkpoint.cursor = page_token
checkpoint.ids_done = page_token is None
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/jira/utils.py | common/data_source/jira/utils.py | """Helper utilities for the Jira connector."""
from __future__ import annotations
import os
from collections.abc import Collection
from datetime import datetime, timezone
from typing import Any, Iterable
from jira.resources import Issue
from common.data_source.utils import datetime_from_string
JIRA_SERVER_API_VERSION = os.environ.get("JIRA_SERVER_API_VERSION", "2")
JIRA_CLOUD_API_VERSION = os.environ.get("JIRA_CLOUD_API_VERSION", "3")
def build_issue_url(base_url: str, issue_key: str) -> str:
"""Return the canonical UI URL for a Jira issue."""
return f"{base_url.rstrip('/')}/browse/{issue_key}"
def parse_jira_datetime(value: Any) -> datetime | None:
"""Best-effort parse of Jira datetime values to aware UTC datetimes."""
if value is None:
return None
if isinstance(value, datetime):
return value.astimezone(timezone.utc) if value.tzinfo else value.replace(tzinfo=timezone.utc)
if isinstance(value, str):
return datetime_from_string(value)
return None
def extract_named_value(value: Any) -> str | None:
"""Extract a readable string out of Jira's typed objects."""
if value is None:
return None
if isinstance(value, str):
return value
if isinstance(value, dict):
return value.get("name") or value.get("value")
return getattr(value, "name", None)
def extract_user(value: Any) -> tuple[str | None, str | None]:
"""Return display name + email tuple for a Jira user blob."""
if value is None:
return None, None
if isinstance(value, dict):
return value.get("displayName"), value.get("emailAddress")
display = getattr(value, "displayName", None)
email = getattr(value, "emailAddress", None)
return display, email
def extract_text_from_adf(adf: Any) -> str:
"""Flatten Atlassian Document Format (ADF) structures to text."""
texts: list[str] = []
def _walk(node: Any) -> None:
if node is None:
return
if isinstance(node, dict):
node_type = node.get("type")
if node_type == "text":
texts.append(node.get("text", ""))
for child in node.get("content", []):
_walk(child)
elif isinstance(node, list):
for child in node:
_walk(child)
_walk(adf)
return "\n".join(part for part in texts if part)
def extract_body_text(value: Any) -> str:
"""Normalize Jira description/comments (raw/adf/str) into plain text."""
if value is None:
return ""
if isinstance(value, str):
return value.strip()
if isinstance(value, dict):
return extract_text_from_adf(value).strip()
return str(value).strip()
def format_comments(
comment_block: Any,
*,
blacklist: Collection[str],
) -> str:
"""Convert Jira comments into a markdown-ish bullet list."""
if not isinstance(comment_block, dict):
return ""
comments = comment_block.get("comments") or []
lines: list[str] = []
normalized_blacklist = {email.lower() for email in blacklist if email}
for comment in comments:
author = comment.get("author") or {}
author_email = (author.get("emailAddress") or "").lower()
if author_email and author_email in normalized_blacklist:
continue
author_name = author.get("displayName") or author.get("name") or author_email or "Unknown"
created = parse_jira_datetime(comment.get("created"))
created_str = created.isoformat() if created else "Unknown time"
body = extract_body_text(comment.get("body"))
if not body:
continue
lines.append(f"- {author_name} ({created_str}):\n{body}")
return "\n\n".join(lines)
def format_attachments(attachments: Any) -> str:
"""List Jira attachments as bullet points."""
if not isinstance(attachments, list):
return ""
attachment_lines: list[str] = []
for attachment in attachments:
filename = attachment.get("filename")
if not filename:
continue
size = attachment.get("size")
size_text = f" ({size} bytes)" if isinstance(size, int) else ""
content_url = attachment.get("content") or ""
url_suffix = f" -> {content_url}" if content_url else ""
attachment_lines.append(f"- {filename}{size_text}{url_suffix}")
return "\n".join(attachment_lines)
def should_skip_issue(issue: Issue, labels_to_skip: set[str]) -> bool:
"""Return True if the issue contains any label from the skip list."""
if not labels_to_skip:
return False
fields = getattr(issue, "raw", {}).get("fields", {})
labels: Iterable[str] = fields.get("labels") or []
for label in labels:
if (label or "").lower() in labels_to_skip:
return True
return False
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/jira/__init__.py | common/data_source/jira/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/doc_conversion.py | common/data_source/google_drive/doc_conversion.py | import io
import logging
import mimetypes
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, cast
from urllib.parse import urlparse, urlunparse
from googleapiclient.errors import HttpError # type: ignore # type: ignore
from googleapiclient.http import MediaIoBaseDownload # type: ignore
from pydantic import BaseModel
from common.data_source.config import DocumentSource, FileOrigin
from common.data_source.google_drive.constant import DRIVE_FOLDER_TYPE, DRIVE_SHORTCUT_TYPE
from common.data_source.google_drive.model import GDriveMimeType, GoogleDriveFileType
from common.data_source.google_drive.section_extraction import HEADING_DELIMITER
from common.data_source.google_util.resource import GoogleDriveService, get_drive_service
from common.data_source.models import ConnectorFailure, Document, DocumentFailure, ImageSection, SlimDocument, TextSection
from common.data_source.utils import get_file_ext
# Image types that should be excluded from processing
EXCLUDED_IMAGE_TYPES = [
"image/bmp",
"image/tiff",
"image/gif",
"image/svg+xml",
"image/avif",
]
GOOGLE_MIME_TYPES_TO_EXPORT = {
GDriveMimeType.DOC.value: "text/plain",
GDriveMimeType.SPREADSHEET.value: "text/csv",
GDriveMimeType.PPT.value: "text/plain",
}
GOOGLE_NATIVE_EXPORT_TARGETS: dict[str, tuple[str, str]] = {
GDriveMimeType.DOC.value: ("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx"),
GDriveMimeType.SPREADSHEET.value: ("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx"),
GDriveMimeType.PPT.value: ("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx"),
}
GOOGLE_NATIVE_EXPORT_FALLBACK: tuple[str, str] = ("application/pdf", ".pdf")
ACCEPTED_PLAIN_TEXT_FILE_EXTENSIONS = [
".txt",
".md",
".mdx",
".conf",
".log",
".json",
".csv",
".tsv",
".xml",
".yml",
".yaml",
".sql",
]
ACCEPTED_DOCUMENT_FILE_EXTENSIONS = [
".pdf",
".docx",
".pptx",
".xlsx",
".eml",
".epub",
".html",
]
ACCEPTED_IMAGE_FILE_EXTENSIONS = [
".png",
".jpg",
".jpeg",
".webp",
]
ALL_ACCEPTED_FILE_EXTENSIONS = ACCEPTED_PLAIN_TEXT_FILE_EXTENSIONS + ACCEPTED_DOCUMENT_FILE_EXTENSIONS + ACCEPTED_IMAGE_FILE_EXTENSIONS
MAX_RETRIEVER_EMAILS = 20
CHUNK_SIZE_BUFFER = 64 # extra bytes past the limit to read
# This is not a standard valid Unicode char, it is used by the docs advanced API to
# represent smart chips (elements like dates and doc links).
SMART_CHIP_CHAR = "\ue907"
WEB_VIEW_LINK_KEY = "webViewLink"
# Fallback templates for generating web links when Drive omits webViewLink.
_FALLBACK_WEB_VIEW_LINK_TEMPLATES = {
GDriveMimeType.DOC.value: "https://docs.google.com/document/d/{}/view",
GDriveMimeType.SPREADSHEET.value: "https://docs.google.com/spreadsheets/d/{}/view",
GDriveMimeType.PPT.value: "https://docs.google.com/presentation/d/{}/view",
}
class PermissionSyncContext(BaseModel):
"""
This is the information that is needed to sync permissions for a document.
"""
primary_admin_email: str
google_domain: str
def onyx_document_id_from_drive_file(file: GoogleDriveFileType) -> str:
link = file.get(WEB_VIEW_LINK_KEY)
if not link:
file_id = file.get("id")
if not file_id:
raise KeyError(f"Google Drive file missing both '{WEB_VIEW_LINK_KEY}' and 'id' fields.")
mime_type = file.get("mimeType", "")
template = _FALLBACK_WEB_VIEW_LINK_TEMPLATES.get(mime_type)
if template is None:
link = f"https://drive.google.com/file/d/{file_id}/view"
else:
link = template.format(file_id)
logging.debug(
"Missing webViewLink for Google Drive file with id %s. Falling back to constructed link %s",
file_id,
link,
)
parsed_url = urlparse(link)
parsed_url = parsed_url._replace(query="") # remove query parameters
spl_path = parsed_url.path.split("/")
if spl_path and (spl_path[-1] in ["edit", "view", "preview"]):
spl_path.pop()
parsed_url = parsed_url._replace(path="/".join(spl_path))
# Remove query parameters and reconstruct URL
return urlunparse(parsed_url)
def _find_nth(haystack: str, needle: str, n: int, start: int = 0) -> int:
start = haystack.find(needle, start)
while start >= 0 and n > 1:
start = haystack.find(needle, start + len(needle))
n -= 1
return start
def align_basic_advanced(basic_sections: list[TextSection | ImageSection], adv_sections: list[TextSection]) -> list[TextSection | ImageSection]:
"""Align the basic sections with the advanced sections.
In particular, the basic sections contain all content of the file,
including smart chips like dates and doc links. The advanced sections
are separated by section headers and contain header-based links that
improve user experience when they click on the source in the UI.
There are edge cases in text matching (i.e. the heading is a smart chip or
there is a smart chip in the doc with text containing the actual heading text)
that make the matching imperfect; this is hence done on a best-effort basis.
"""
if len(adv_sections) <= 1:
return basic_sections # no benefit from aligning
basic_full_text = "".join([section.text for section in basic_sections if isinstance(section, TextSection)])
new_sections: list[TextSection | ImageSection] = []
heading_start = 0
for adv_ind in range(1, len(adv_sections)):
heading = adv_sections[adv_ind].text.split(HEADING_DELIMITER)[0]
# retrieve the longest part of the heading that is not a smart chip
heading_key = max(heading.split(SMART_CHIP_CHAR), key=len).strip()
if heading_key == "":
logging.warning(f"Cannot match heading: {heading}, its link will come from the following section")
continue
heading_offset = heading.find(heading_key)
# count occurrences of heading str in previous section
heading_count = adv_sections[adv_ind - 1].text.count(heading_key)
prev_start = heading_start
heading_start = _find_nth(basic_full_text, heading_key, heading_count, start=prev_start) - heading_offset
if heading_start < 0:
logging.warning(f"Heading key {heading_key} from heading {heading} not found in basic text")
heading_start = prev_start
continue
new_sections.append(
TextSection(
link=adv_sections[adv_ind - 1].link,
text=basic_full_text[prev_start:heading_start],
)
)
# handle last section
new_sections.append(TextSection(link=adv_sections[-1].link, text=basic_full_text[heading_start:]))
return new_sections
def is_valid_image_type(mime_type: str) -> bool:
"""
Check if mime_type is a valid image type.
Args:
mime_type: The MIME type to check
Returns:
True if the MIME type is a valid image type, False otherwise
"""
return bool(mime_type) and mime_type.startswith("image/") and mime_type not in EXCLUDED_IMAGE_TYPES
def is_gdrive_image_mime_type(mime_type: str) -> bool:
"""
Return True if the mime_type is a common image type in GDrive.
(e.g. 'image/png', 'image/jpeg')
"""
return is_valid_image_type(mime_type)
def _get_extension_from_file(file: GoogleDriveFileType, mime_type: str, fallback: str = ".bin") -> str:
file_name = file.get("name") or ""
if file_name:
suffix = Path(file_name).suffix
if suffix:
return suffix
file_extension = file.get("fileExtension")
if file_extension:
return f".{file_extension.lstrip('.')}"
guessed = mimetypes.guess_extension(mime_type or "")
if guessed:
return guessed
return fallback
def _download_file_blob(
service: GoogleDriveService,
file: GoogleDriveFileType,
size_threshold: int,
allow_images: bool,
) -> tuple[bytes, str] | None:
mime_type = file.get("mimeType", "")
file_id = file.get("id")
if not file_id:
logging.warning("Encountered Google Drive file without id.")
return None
if is_gdrive_image_mime_type(mime_type) and not allow_images:
logging.debug(f"Skipping image {file.get('name')} because allow_images is False.")
return None
blob: bytes = b""
extension = ".bin"
try:
if mime_type in GOOGLE_NATIVE_EXPORT_TARGETS:
export_mime, extension = GOOGLE_NATIVE_EXPORT_TARGETS[mime_type]
request = service.files().export_media(fileId=file_id, mimeType=export_mime)
blob = _download_request(request, file_id, size_threshold)
elif mime_type.startswith("application/vnd.google-apps"):
export_mime, extension = GOOGLE_NATIVE_EXPORT_FALLBACK
request = service.files().export_media(fileId=file_id, mimeType=export_mime)
blob = _download_request(request, file_id, size_threshold)
else:
extension = _get_extension_from_file(file, mime_type)
blob = download_request(service, file_id, size_threshold)
except HttpError:
raise
if not blob:
return None
if not extension:
extension = _get_extension_from_file(file, mime_type)
return blob, extension
def download_request(service: GoogleDriveService, file_id: str, size_threshold: int) -> bytes:
"""
Download the file from Google Drive.
"""
# For other file types, download the file
# Use the correct API call for downloading files
request = service.files().get_media(fileId=file_id)
return _download_request(request, file_id, size_threshold)
def _download_request(request: Any, file_id: str, size_threshold: int) -> bytes:
response_bytes = io.BytesIO()
downloader = MediaIoBaseDownload(response_bytes, request, chunksize=size_threshold + CHUNK_SIZE_BUFFER)
done = False
while not done:
download_progress, done = downloader.next_chunk()
if download_progress.resumable_progress > size_threshold:
logging.warning(f"File {file_id} exceeds size threshold of {size_threshold}. Skipping2.")
return bytes()
response = response_bytes.getvalue()
if not response:
logging.warning(f"Failed to download {file_id}")
return bytes()
return response
def _download_and_extract_sections_basic(
file: dict[str, str],
service: GoogleDriveService,
allow_images: bool,
size_threshold: int,
) -> list[TextSection | ImageSection]:
"""Extract text and images from a Google Drive file."""
file_id = file["id"]
file_name = file["name"]
mime_type = file["mimeType"]
link = file.get(WEB_VIEW_LINK_KEY, "")
# For non-Google files, download the file
# Use the correct API call for downloading files
# lazy evaluation to only download the file if necessary
def response_call() -> bytes:
return download_request(service, file_id, size_threshold)
if is_gdrive_image_mime_type(mime_type):
# Skip images if not explicitly enabled
if not allow_images:
return []
# Store images for later processing
sections: list[TextSection | ImageSection] = []
def store_image_and_create_section(**kwargs):
pass
try:
section, embedded_id = store_image_and_create_section(
image_data=response_call(),
file_id=file_id,
display_name=file_name,
media_type=mime_type,
file_origin=FileOrigin.CONNECTOR,
link=link,
)
sections.append(section)
except Exception as e:
logging.error(f"Failed to process image {file_name}: {e}")
return sections
# For Google Docs, Sheets, and Slides, export as plain text
if mime_type in GOOGLE_MIME_TYPES_TO_EXPORT:
export_mime_type = GOOGLE_MIME_TYPES_TO_EXPORT[mime_type]
# Use the correct API call for exporting files
request = service.files().export_media(fileId=file_id, mimeType=export_mime_type)
response = _download_request(request, file_id, size_threshold)
if not response:
logging.warning(f"Failed to export {file_name} as {export_mime_type}")
return []
text = response.decode("utf-8")
return [TextSection(link=link, text=text)]
# Process based on mime type
if mime_type == "text/plain":
try:
text = response_call().decode("utf-8")
return [TextSection(link=link, text=text)]
except UnicodeDecodeError as e:
logging.warning(f"Failed to extract text from {file_name}: {e}")
return []
elif mime_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
def docx_to_text_and_images(*args, **kwargs):
return "docx_to_text_and_images"
text, _ = docx_to_text_and_images(io.BytesIO(response_call()))
return [TextSection(link=link, text=text)]
elif mime_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
def xlsx_to_text(*args, **kwargs):
return "xlsx_to_text"
text = xlsx_to_text(io.BytesIO(response_call()), file_name=file_name)
return [TextSection(link=link, text=text)] if text else []
elif mime_type == "application/vnd.openxmlformats-officedocument.presentationml.presentation":
def pptx_to_text(*args, **kwargs):
return "pptx_to_text"
text = pptx_to_text(io.BytesIO(response_call()), file_name=file_name)
return [TextSection(link=link, text=text)] if text else []
elif mime_type == "application/pdf":
def read_pdf_file(*args, **kwargs):
return "read_pdf_file"
text, _pdf_meta, images = read_pdf_file(io.BytesIO(response_call()))
pdf_sections: list[TextSection | ImageSection] = [TextSection(link=link, text=text)]
# Process embedded images in the PDF
try:
for idx, (img_data, img_name) in enumerate(images):
section, embedded_id = store_image_and_create_section(
image_data=img_data,
file_id=f"{file_id}_img_{idx}",
display_name=img_name or f"{file_name} - image {idx}",
file_origin=FileOrigin.CONNECTOR,
)
pdf_sections.append(section)
except Exception as e:
logging.error(f"Failed to process PDF images in {file_name}: {e}")
return pdf_sections
# Final attempt at extracting text
file_ext = get_file_ext(file.get("name", ""))
if file_ext not in ALL_ACCEPTED_FILE_EXTENSIONS:
logging.warning(f"Skipping file {file.get('name')} due to extension.")
return []
try:
def extract_file_text(*args, **kwargs):
return "extract_file_text"
text = extract_file_text(io.BytesIO(response_call()), file_name)
return [TextSection(link=link, text=text)]
except Exception as e:
logging.warning(f"Failed to extract text from {file_name}: {e}")
return []
def _convert_drive_item_to_document(
creds: Any,
allow_images: bool,
size_threshold: int,
retriever_email: str,
file: GoogleDriveFileType,
# if not specified, we will not sync permissions
# will also be a no-op if EE is not enabled
permission_sync_context: PermissionSyncContext | None,
) -> Document | ConnectorFailure | None:
"""
Main entry point for converting a Google Drive file => Document object.
"""
def _get_drive_service() -> GoogleDriveService:
return get_drive_service(creds, user_email=retriever_email)
doc_id = "unknown"
link = file.get(WEB_VIEW_LINK_KEY)
try:
if file.get("mimeType") in [DRIVE_SHORTCUT_TYPE, DRIVE_FOLDER_TYPE]:
logging.info("Skipping shortcut/folder.")
return None
size_str = file.get("size")
if size_str:
try:
size_int = int(size_str)
except ValueError:
logging.warning(f"Parsing string to int failed: size_str={size_str}")
else:
if size_int > size_threshold:
logging.warning(f"{file.get('name')} exceeds size threshold of {size_threshold}. Skipping.")
return None
blob_and_ext = _download_file_blob(
service=_get_drive_service(),
file=file,
size_threshold=size_threshold,
allow_images=allow_images,
)
if blob_and_ext is None:
logging.info(f"Skipping file {file.get('name')} due to incompatible type or download failure.")
return None
blob, extension = blob_and_ext
if not blob:
logging.warning(f"Failed to download {file.get('name')}. Skipping.")
return None
doc_id = onyx_document_id_from_drive_file(file)
modified_time = file.get("modifiedTime")
try:
doc_updated_at = datetime.fromisoformat(modified_time.replace("Z", "+00:00")) if modified_time else datetime.now(timezone.utc)
except ValueError:
logging.warning(f"Failed to parse modifiedTime for {file.get('name')}, defaulting to current time.")
doc_updated_at = datetime.now(timezone.utc)
return Document(
id=doc_id,
source=DocumentSource.GOOGLE_DRIVE,
semantic_identifier=file.get("name", ""),
blob=blob,
extension=extension,
size_bytes=len(blob),
doc_updated_at=doc_updated_at,
)
except Exception as e:
doc_id = "unknown"
try:
doc_id = onyx_document_id_from_drive_file(file)
except Exception as e2:
logging.warning(f"Error getting document id from file: {e2}")
file_name = file.get("name", doc_id)
error_str = f"Error converting file '{file_name}' to Document as {retriever_email}: {e}"
if isinstance(e, HttpError) and e.status_code == 403:
logging.warning(f"Uncommon permissions error while downloading file. User {retriever_email} was able to see file {file_name} but cannot download it.")
logging.warning(error_str)
return ConnectorFailure(
failed_document=DocumentFailure(
document_id=doc_id,
document_link=link,
),
failed_entity=None,
failure_message=error_str,
exception=e,
)
def convert_drive_item_to_document(
creds: Any,
allow_images: bool,
size_threshold: int,
# if not specified, we will not sync permissions
# will also be a no-op if EE is not enabled
permission_sync_context: PermissionSyncContext | None,
retriever_emails: list[str],
file: GoogleDriveFileType,
) -> Document | ConnectorFailure | None:
"""
Attempt to convert a drive item to a document with each retriever email
in order. returns upon a successful retrieval or a non-403 error.
We used to always get the user email from the file owners when available,
but this was causing issues with shared folders where the owner was not included in the service account
now we use the email of the account that successfully listed the file. There are cases where a
user that can list a file cannot download it, so we retry with file owners and admin email.
"""
first_error = None
doc_or_failure = None
retriever_emails = retriever_emails[:MAX_RETRIEVER_EMAILS]
# use seen instead of list(set()) to avoid re-ordering the retriever emails
seen = set()
for retriever_email in retriever_emails:
if retriever_email in seen:
continue
seen.add(retriever_email)
doc_or_failure = _convert_drive_item_to_document(
creds,
allow_images,
size_threshold,
retriever_email,
file,
permission_sync_context,
)
# There are a variety of permissions-based errors that occasionally occur
# when retrieving files. Often when these occur, there is another user
# that can successfully retrieve the file, so we try the next user.
if doc_or_failure is None or isinstance(doc_or_failure, Document) or not (isinstance(doc_or_failure.exception, HttpError) and doc_or_failure.exception.status_code in [401, 403, 404]):
return doc_or_failure
if first_error is None:
first_error = doc_or_failure
else:
first_error.failure_message += f"\n\n{doc_or_failure.failure_message}"
if first_error and isinstance(first_error.exception, HttpError) and first_error.exception.status_code == 403:
# This SHOULD happen very rarely, and we don't want to break the indexing process when
# a high volume of 403s occurs early. We leave a verbose log to help investigate.
logging.error(
f"Skipping file id: {file.get('id')} name: {file.get('name')} due to 403 error.Attempted to retrieve with {retriever_emails},got the following errors: {first_error.failure_message}"
)
return None
return first_error
def build_slim_document(
creds: Any,
file: GoogleDriveFileType,
# if not specified, we will not sync permissions
# will also be a no-op if EE is not enabled
permission_sync_context: PermissionSyncContext | None,
) -> SlimDocument | None:
if file.get("mimeType") in [DRIVE_FOLDER_TYPE, DRIVE_SHORTCUT_TYPE]:
return None
owner_email = cast(str | None, file.get("owners", [{}])[0].get("emailAddress"))
def _get_external_access_for_raw_gdrive_file(*args, **kwargs):
return None
external_access = (
_get_external_access_for_raw_gdrive_file(
file=file,
company_domain=permission_sync_context.google_domain,
retriever_drive_service=(
get_drive_service(
creds,
user_email=owner_email,
)
if owner_email
else None
),
admin_drive_service=get_drive_service(
creds,
user_email=permission_sync_context.primary_admin_email,
),
)
if permission_sync_context
else None
)
return SlimDocument(
id=onyx_document_id_from_drive_file(file),
external_access=external_access,
)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/connector.py | common/data_source/google_drive/connector.py | """Google Drive connector"""
import copy
import logging
import os
import sys
import threading
from collections.abc import Callable, Generator, Iterator
from enum import Enum
from functools import partial
from typing import Any, Protocol, cast
from urllib.parse import urlparse
from google.auth.exceptions import RefreshError # type: ignore # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore # type: ignore # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore # type: ignore
from googleapiclient.errors import HttpError # type: ignore # type: ignore
from typing_extensions import override
from common.data_source.config import GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD, INDEX_BATCH_SIZE, SLIM_BATCH_SIZE, DocumentSource
from common.data_source.exceptions import ConnectorMissingCredentialError, ConnectorValidationError, CredentialExpiredError, InsufficientPermissionsError
from common.data_source.google_drive.doc_conversion import PermissionSyncContext, build_slim_document, convert_drive_item_to_document, onyx_document_id_from_drive_file
from common.data_source.google_drive.file_retrieval import (
DriveFileFieldType,
crawl_folders_for_files,
get_all_files_for_oauth,
get_all_files_in_my_drive_and_shared,
get_files_in_shared_drive,
get_root_folder_id,
)
from common.data_source.google_drive.model import DriveRetrievalStage, GoogleDriveCheckpoint, GoogleDriveFileType, RetrievedDriveFile, StageCompletion
from common.data_source.google_util.auth import get_google_creds
from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, USER_FIELDS
from common.data_source.google_util.resource import GoogleDriveService, get_admin_service, get_drive_service
from common.data_source.google_util.util import GoogleFields, execute_paginated_retrieval, get_file_owners
from common.data_source.google_util.util_threadpool_concurrency import ThreadSafeDict
from common.data_source.interfaces import (
CheckpointedConnectorWithPermSync,
IndexingHeartbeatInterface,
SlimConnectorWithPermSync,
)
from common.data_source.models import CheckpointOutput, ConnectorFailure, Document, EntityFailure, GenerateSlimDocumentOutput, SecondsSinceUnixEpoch
from common.data_source.utils import datetime_from_string, parallel_yield, run_functions_tuples_in_parallel
MAX_DRIVE_WORKERS = int(os.environ.get("MAX_DRIVE_WORKERS", 4))
SHARED_DRIVE_PAGES_PER_CHECKPOINT = 2
MY_DRIVE_PAGES_PER_CHECKPOINT = 2
OAUTH_PAGES_PER_CHECKPOINT = 2
FOLDERS_PER_CHECKPOINT = 1
def _extract_str_list_from_comma_str(string: str | None) -> list[str]:
if not string:
return []
return [s.strip() for s in string.split(",") if s.strip()]
def _extract_ids_from_urls(urls: list[str]) -> list[str]:
return [urlparse(url).path.strip("/").split("/")[-1] for url in urls]
def _clean_requested_drive_ids(
requested_drive_ids: set[str],
requested_folder_ids: set[str],
all_drive_ids_available: set[str],
) -> tuple[list[str], list[str]]:
invalid_requested_drive_ids = requested_drive_ids - all_drive_ids_available
filtered_folder_ids = requested_folder_ids - all_drive_ids_available
if invalid_requested_drive_ids:
logging.warning(f"Some shared drive IDs were not found. IDs: {invalid_requested_drive_ids}")
logging.warning("Checking for folder access instead...")
filtered_folder_ids.update(invalid_requested_drive_ids)
valid_requested_drive_ids = requested_drive_ids - invalid_requested_drive_ids
return sorted(valid_requested_drive_ids), sorted(filtered_folder_ids)
def add_retrieval_info(
drive_files: Iterator[GoogleDriveFileType | str],
user_email: str,
completion_stage: DriveRetrievalStage,
parent_id: str | None = None,
) -> Iterator[RetrievedDriveFile | str]:
for file in drive_files:
if isinstance(file, str):
yield file
continue
yield RetrievedDriveFile(
drive_file=file,
user_email=user_email,
parent_id=parent_id,
completion_stage=completion_stage,
)
class CredentialedRetrievalMethod(Protocol):
def __call__(
self,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]: ...
class DriveIdStatus(str, Enum):
AVAILABLE = "available"
IN_PROGRESS = "in_progress"
FINISHED = "finished"
class GoogleDriveConnector(SlimConnectorWithPermSync, CheckpointedConnectorWithPermSync):
def __init__(
self,
include_shared_drives: bool = False,
include_my_drives: bool = False,
include_files_shared_with_me: bool = False,
shared_drive_urls: str | None = None,
my_drive_emails: str | None = None,
shared_folder_urls: str | None = None,
specific_user_emails: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
if not any(
(
include_shared_drives,
include_my_drives,
include_files_shared_with_me,
shared_folder_urls,
my_drive_emails,
shared_drive_urls,
)
):
raise ConnectorValidationError(
"Nothing to index. Please specify at least one of the following: include_shared_drives, include_my_drives, include_files_shared_with_me, shared_folder_urls, or my_drive_emails"
)
specific_requests_made = False
if bool(shared_drive_urls) or bool(my_drive_emails) or bool(shared_folder_urls):
specific_requests_made = True
self.specific_requests_made = specific_requests_made
# NOTE: potentially modified in load_credentials if using service account
self.include_files_shared_with_me = False if specific_requests_made else include_files_shared_with_me
self.include_my_drives = False if specific_requests_made else include_my_drives
self.include_shared_drives = False if specific_requests_made else include_shared_drives
shared_drive_url_list = _extract_str_list_from_comma_str(shared_drive_urls)
self._requested_shared_drive_ids = set(_extract_ids_from_urls(shared_drive_url_list))
self._requested_my_drive_emails = set(_extract_str_list_from_comma_str(my_drive_emails))
shared_folder_url_list = _extract_str_list_from_comma_str(shared_folder_urls)
self._requested_folder_ids = set(_extract_ids_from_urls(shared_folder_url_list))
self._specific_user_emails = _extract_str_list_from_comma_str(specific_user_emails)
self._primary_admin_email: str | None = None
self._creds: OAuthCredentials | ServiceAccountCredentials | None = None
self._creds_dict: dict[str, Any] | None = None
# ids of folders and shared drives that have been traversed
self._retrieved_folder_and_drive_ids: set[str] = set()
self.allow_images = False
self.size_threshold = GOOGLE_DRIVE_CONNECTOR_SIZE_THRESHOLD
self.logger = logging.getLogger(self.__class__.__name__)
def set_allow_images(self, value: bool) -> None:
self.allow_images = value
@property
def primary_admin_email(self) -> str:
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email
@property
def google_domain(self) -> str:
if self._primary_admin_email is None:
raise RuntimeError("Primary admin email missing, should not call this property before calling load_credentials")
return self._primary_admin_email.split("@")[-1]
@property
def creds(self) -> OAuthCredentials | ServiceAccountCredentials:
if self._creds is None:
raise RuntimeError("Creds missing, should not call this property before calling load_credentials")
return self._creds
# TODO: ensure returned new_creds_dict is actually persisted when this is called?
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
try:
self._primary_admin_email = credentials[DB_CREDENTIALS_PRIMARY_ADMIN_KEY]
except KeyError:
raise ValueError("Credentials json missing primary admin key")
self._creds, new_creds_dict = get_google_creds(
credentials=credentials,
source=DocumentSource.GOOGLE_DRIVE,
)
# Service account connectors don't have a specific setting determining whether
# to include "shared with me" for each user, so we default to true unless the connector
# is in specific folders/drives mode. Note that shared files are only picked up during
# the My Drive stage, so this does nothing if the connector is set to only index shared drives.
if isinstance(self._creds, ServiceAccountCredentials) and not self.specific_requests_made:
self.include_files_shared_with_me = True
self._creds_dict = new_creds_dict
return new_creds_dict
def _update_traversed_parent_ids(self, folder_id: str) -> None:
self._retrieved_folder_and_drive_ids.add(folder_id)
def _get_all_user_emails(self) -> list[str]:
if self._specific_user_emails:
return self._specific_user_emails
# Start with primary admin email
user_emails = [self.primary_admin_email]
# Only fetch additional users if using service account
if isinstance(self.creds, OAuthCredentials):
return user_emails
admin_service = get_admin_service(
creds=self.creds,
user_email=self.primary_admin_email,
)
# Get admins first since they're more likely to have access to most files
for is_admin in [True, False]:
query = "isAdmin=true" if is_admin else "isAdmin=false"
for user in execute_paginated_retrieval(
retrieval_function=admin_service.users().list,
list_key="users",
fields=USER_FIELDS,
domain=self.google_domain,
query=query,
):
if email := user.get("primaryEmail"):
if email not in user_emails:
user_emails.append(email)
return user_emails
def get_all_drive_ids(self) -> set[str]:
return self._get_all_drives_for_user(self.primary_admin_email)
def _get_all_drives_for_user(self, user_email: str) -> set[str]:
drive_service = get_drive_service(self.creds, user_email)
is_service_account = isinstance(self.creds, ServiceAccountCredentials)
self.logger.info(f"Getting all drives for user {user_email} with service account: {is_service_account}")
all_drive_ids: set[str] = set()
for drive in execute_paginated_retrieval(
retrieval_function=drive_service.drives().list,
list_key="drives",
useDomainAdminAccess=is_service_account,
fields="drives(id),nextPageToken",
):
all_drive_ids.add(drive["id"])
if not all_drive_ids:
self.logger.warning("No drives found even though indexing shared drives was requested.")
return all_drive_ids
def make_drive_id_getter(self, drive_ids: list[str], checkpoint: GoogleDriveCheckpoint) -> Callable[[str], str | None]:
status_lock = threading.Lock()
in_progress_drive_ids = {
completion.current_folder_or_drive_id: user_email
for user_email, completion in checkpoint.completion_map.items()
if completion.stage == DriveRetrievalStage.SHARED_DRIVE_FILES and completion.current_folder_or_drive_id is not None
}
drive_id_status: dict[str, DriveIdStatus] = {}
for drive_id in drive_ids:
if drive_id in self._retrieved_folder_and_drive_ids:
drive_id_status[drive_id] = DriveIdStatus.FINISHED
elif drive_id in in_progress_drive_ids:
drive_id_status[drive_id] = DriveIdStatus.IN_PROGRESS
else:
drive_id_status[drive_id] = DriveIdStatus.AVAILABLE
def get_available_drive_id(thread_id: str) -> str | None:
completion = checkpoint.completion_map[thread_id]
with status_lock:
future_work = None
for drive_id, status in drive_id_status.items():
if drive_id in self._retrieved_folder_and_drive_ids:
drive_id_status[drive_id] = DriveIdStatus.FINISHED
continue
if drive_id in completion.processed_drive_ids:
continue
if status == DriveIdStatus.AVAILABLE:
# add to processed drive ids so if this user fails to retrieve once
# they won't try again on the next checkpoint run
completion.processed_drive_ids.add(drive_id)
return drive_id
elif status == DriveIdStatus.IN_PROGRESS:
self.logger.debug(f"Drive id in progress: {drive_id}")
future_work = drive_id
if future_work:
# in this case, all drive ids are either finished or in progress.
# This thread will pick up one of the in progress ones in case it fails.
# This is a much simpler approach than waiting for a failure picking it up,
# at the cost of some repeated work until all shared drives are retrieved.
# we avoid apocalyptic cases like all threads focusing on one huge drive
# because the drive id is added to _retrieved_folder_and_drive_ids after any thread
# manages to retrieve any file from it (unfortunately, this is also the reason we currently
# sometimes fail to retrieve restricted access folders/files)
completion.processed_drive_ids.add(future_work)
return future_work
return None # no work available, return None
return get_available_drive_id
def _impersonate_user_for_retrieval(
self,
user_email: str,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
get_new_drive_id: Callable[[str], str | None],
sorted_filtered_folder_ids: list[str],
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
self.logger.info(f"Impersonating user {user_email}")
curr_stage = checkpoint.completion_map[user_email]
resuming = True
if curr_stage.stage == DriveRetrievalStage.START:
self.logger.info(f"Setting stage to {DriveRetrievalStage.MY_DRIVE_FILES.value}")
curr_stage.stage = DriveRetrievalStage.MY_DRIVE_FILES
resuming = False
drive_service = get_drive_service(self.creds, user_email)
# validate that the user has access to the drive APIs by performing a simple
# request and checking for a 401
try:
self.logger.debug(f"Getting root folder id for user {user_email}")
get_root_folder_id(drive_service)
except HttpError as e:
if e.status_code == 401:
# fail gracefully, let the other impersonations continue
# one user without access shouldn't block the entire connector
self.logger.warning(f"User '{user_email}' does not have access to the drive APIs.")
# mark this user as done so we don't try to retrieve anything for them
# again
curr_stage.stage = DriveRetrievalStage.DONE
return
raise
except RefreshError as e:
self.logger.warning(f"User '{user_email}' could not refresh their token. Error: {e}")
# mark this user as done so we don't try to retrieve anything for them
# again
yield RetrievedDriveFile(
completion_stage=DriveRetrievalStage.DONE,
drive_file={},
user_email=user_email,
error=e,
)
curr_stage.stage = DriveRetrievalStage.DONE
return
# if we are including my drives, try to get the current user's my
# drive if any of the following are true:
# - include_my_drives is true
# - the current user's email is in the requested emails
if curr_stage.stage == DriveRetrievalStage.MY_DRIVE_FILES:
if self.include_my_drives or user_email in self._requested_my_drive_emails:
self.logger.info(
f"Getting all files in my drive as '{user_email}. Resuming: {resuming}. Stage completed until: {curr_stage.completed_until}. Next page token: {curr_stage.next_page_token}"
)
for file_or_token in add_retrieval_info(
get_all_files_in_my_drive_and_shared(
service=drive_service,
update_traversed_ids_func=self._update_traversed_parent_ids,
field_type=field_type,
include_shared_with_me=self.include_files_shared_with_me,
max_num_pages=MY_DRIVE_PAGES_PER_CHECKPOINT,
start=curr_stage.completed_until if resuming else start,
end=end,
cache_folders=not bool(curr_stage.completed_until),
page_token=curr_stage.next_page_token,
),
user_email,
DriveRetrievalStage.MY_DRIVE_FILES,
):
if isinstance(file_or_token, str):
self.logger.debug(f"Done with max num pages for user {user_email}")
checkpoint.completion_map[user_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
checkpoint.completion_map[user_email].next_page_token = None
curr_stage.stage = DriveRetrievalStage.SHARED_DRIVE_FILES
curr_stage.current_folder_or_drive_id = None
return # resume from next stage on the next run
if curr_stage.stage == DriveRetrievalStage.SHARED_DRIVE_FILES:
def _yield_from_drive(drive_id: str, drive_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile | str]:
yield from add_retrieval_info(
get_files_in_shared_drive(
service=drive_service,
drive_id=drive_id,
field_type=field_type,
max_num_pages=SHARED_DRIVE_PAGES_PER_CHECKPOINT,
update_traversed_ids_func=self._update_traversed_parent_ids,
cache_folders=not bool(drive_start), # only cache folders for 0 or None
start=drive_start,
end=end,
page_token=curr_stage.next_page_token,
),
user_email,
DriveRetrievalStage.SHARED_DRIVE_FILES,
parent_id=drive_id,
)
# resume from a checkpoint
if resuming and (drive_id := curr_stage.current_folder_or_drive_id):
resume_start = curr_stage.completed_until
for file_or_token in _yield_from_drive(drive_id, resume_start):
if isinstance(file_or_token, str):
checkpoint.completion_map[user_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
drive_id = get_new_drive_id(user_email)
if drive_id:
self.logger.info(f"Getting files in shared drive '{drive_id}' as '{user_email}. Resuming: {resuming}")
curr_stage.completed_until = 0
curr_stage.current_folder_or_drive_id = drive_id
for file_or_token in _yield_from_drive(drive_id, start):
if isinstance(file_or_token, str):
checkpoint.completion_map[user_email].next_page_token = file_or_token
return # done with the max num pages, return checkpoint
yield file_or_token
curr_stage.current_folder_or_drive_id = None
return # get a new drive id on the next run
checkpoint.completion_map[user_email].next_page_token = None
curr_stage.stage = DriveRetrievalStage.FOLDER_FILES
curr_stage.current_folder_or_drive_id = None
return # resume from next stage on the next run
# In the folder files section of service account retrieval we take extra care
# to not retrieve duplicate docs. In particular, we only add a folder to
# retrieved_folder_and_drive_ids when all users are finished retrieving files
# from that folder, and maintain a set of all file ids that have been retrieved
# for each folder. This might get rather large; in practice we assume that the
# specific folders users choose to index don't have too many files.
if curr_stage.stage == DriveRetrievalStage.FOLDER_FILES:
def _yield_from_folder_crawl(folder_id: str, folder_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile]:
for retrieved_file in crawl_folders_for_files(
service=drive_service,
parent_id=folder_id,
field_type=field_type,
user_email=user_email,
traversed_parent_ids=self._retrieved_folder_and_drive_ids,
update_traversed_ids_func=self._update_traversed_parent_ids,
start=folder_start,
end=end,
):
yield retrieved_file
# resume from a checkpoint
last_processed_folder = None
if resuming:
folder_id = curr_stage.current_folder_or_drive_id
if folder_id is None:
self.logger.warning(f"folder id not set in checkpoint for user {user_email}. This happens occasionally when the connector is interrupted and resumed.")
else:
resume_start = curr_stage.completed_until
yield from _yield_from_folder_crawl(folder_id, resume_start)
last_processed_folder = folder_id
skipping_seen_folders = last_processed_folder is not None
# NOTE: this assumes a small number of folders to crawl. If someone
# really wants to specify a large number of folders, we should use
# binary search to find the first unseen folder.
num_completed_folders = 0
for folder_id in sorted_filtered_folder_ids:
if skipping_seen_folders:
skipping_seen_folders = folder_id != last_processed_folder
continue
if folder_id in self._retrieved_folder_and_drive_ids:
continue
curr_stage.completed_until = 0
curr_stage.current_folder_or_drive_id = folder_id
if num_completed_folders >= FOLDERS_PER_CHECKPOINT:
return # resume from this folder on the next run
self.logger.info(f"Getting files in folder '{folder_id}' as '{user_email}'")
yield from _yield_from_folder_crawl(folder_id, start)
num_completed_folders += 1
curr_stage.stage = DriveRetrievalStage.DONE
def _manage_service_account_retrieval(
self,
field_type: DriveFileFieldType,
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
"""
The current implementation of the service account retrieval does some
initial setup work using the primary admin email, then runs MAX_DRIVE_WORKERS
concurrent threads, each of which impersonates a different user and retrieves
files for that user. Technically, the actual work each thread does is "yield the
next file retrieved by the user", at which point it returns to the thread pool;
see parallel_yield for more details.
"""
if checkpoint.completion_stage == DriveRetrievalStage.START:
checkpoint.completion_stage = DriveRetrievalStage.USER_EMAILS
if checkpoint.completion_stage == DriveRetrievalStage.USER_EMAILS:
all_org_emails: list[str] = self._get_all_user_emails()
checkpoint.user_emails = all_org_emails
checkpoint.completion_stage = DriveRetrievalStage.DRIVE_IDS
else:
if checkpoint.user_emails is None:
raise ValueError("user emails not set")
all_org_emails = checkpoint.user_emails
sorted_drive_ids, sorted_folder_ids = self._determine_retrieval_ids(checkpoint, DriveRetrievalStage.MY_DRIVE_FILES)
# Setup initial completion map on first connector run
for email in all_org_emails:
# don't overwrite existing completion map on resuming runs
if email in checkpoint.completion_map:
continue
checkpoint.completion_map[email] = StageCompletion(
stage=DriveRetrievalStage.START,
completed_until=0,
processed_drive_ids=set(),
)
# we've found all users and drives, now time to actually start
# fetching stuff
self.logger.info(f"Found {len(all_org_emails)} users to impersonate")
self.logger.debug(f"Users: {all_org_emails}")
self.logger.info(f"Found {len(sorted_drive_ids)} drives to retrieve")
self.logger.debug(f"Drives: {sorted_drive_ids}")
self.logger.info(f"Found {len(sorted_folder_ids)} folders to retrieve")
self.logger.debug(f"Folders: {sorted_folder_ids}")
drive_id_getter = self.make_drive_id_getter(sorted_drive_ids, checkpoint)
# only process emails that we haven't already completed retrieval for
non_completed_org_emails = [user_email for user_email, stage_completion in checkpoint.completion_map.items() if stage_completion.stage != DriveRetrievalStage.DONE]
self.logger.debug(f"Non-completed users remaining: {len(non_completed_org_emails)}")
# don't process too many emails before returning a checkpoint. This is
# to resolve the case where there are a ton of emails that don't have access
# to the drive APIs. Without this, we could loop through these emails for
# more than 3 hours, causing a timeout and stalling progress.
email_batch_takes_us_to_completion = True
MAX_EMAILS_TO_PROCESS_BEFORE_CHECKPOINTING = MAX_DRIVE_WORKERS
if len(non_completed_org_emails) > MAX_EMAILS_TO_PROCESS_BEFORE_CHECKPOINTING:
non_completed_org_emails = non_completed_org_emails[:MAX_EMAILS_TO_PROCESS_BEFORE_CHECKPOINTING]
email_batch_takes_us_to_completion = False
user_retrieval_gens = [
self._impersonate_user_for_retrieval(
email,
field_type,
checkpoint,
drive_id_getter,
sorted_folder_ids,
start,
end,
)
for email in non_completed_org_emails
]
yield from parallel_yield(user_retrieval_gens, max_workers=MAX_DRIVE_WORKERS)
# if there are more emails to process, don't mark as complete
if not email_batch_takes_us_to_completion:
return
remaining_folders = (set(sorted_drive_ids) | set(sorted_folder_ids)) - self._retrieved_folder_and_drive_ids
if remaining_folders:
self.logger.warning(f"Some folders/drives were not retrieved. IDs: {remaining_folders}")
if any(checkpoint.completion_map[user_email].stage != DriveRetrievalStage.DONE for user_email in all_org_emails):
self.logger.info("some users did not complete retrieval, returning checkpoint for another run")
return
checkpoint.completion_stage = DriveRetrievalStage.DONE
def _determine_retrieval_ids(
self,
checkpoint: GoogleDriveCheckpoint,
next_stage: DriveRetrievalStage,
) -> tuple[list[str], list[str]]:
all_drive_ids = self.get_all_drive_ids()
sorted_drive_ids: list[str] = []
sorted_folder_ids: list[str] = []
if checkpoint.completion_stage == DriveRetrievalStage.DRIVE_IDS:
if self._requested_shared_drive_ids or self._requested_folder_ids:
(
sorted_drive_ids,
sorted_folder_ids,
) = _clean_requested_drive_ids(
requested_drive_ids=self._requested_shared_drive_ids,
requested_folder_ids=self._requested_folder_ids,
all_drive_ids_available=all_drive_ids,
)
elif self.include_shared_drives:
sorted_drive_ids = sorted(all_drive_ids)
checkpoint.drive_ids_to_retrieve = sorted_drive_ids
checkpoint.folder_ids_to_retrieve = sorted_folder_ids
checkpoint.completion_stage = next_stage
else:
if checkpoint.drive_ids_to_retrieve is None:
raise ValueError("drive ids to retrieve not set in checkpoint")
if checkpoint.folder_ids_to_retrieve is None:
raise ValueError("folder ids to retrieve not set in checkpoint")
# When loading from a checkpoint, load the previously cached drive and folder ids
sorted_drive_ids = checkpoint.drive_ids_to_retrieve
sorted_folder_ids = checkpoint.folder_ids_to_retrieve
return sorted_drive_ids, sorted_folder_ids
def _oauth_retrieval_drives(
self,
field_type: DriveFileFieldType,
drive_service: GoogleDriveService,
drive_ids_to_retrieve: list[str],
checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile | str]:
def _yield_from_drive(drive_id: str, drive_start: SecondsSinceUnixEpoch | None) -> Iterator[RetrievedDriveFile | str]:
yield from add_retrieval_info(
get_files_in_shared_drive(
service=drive_service,
drive_id=drive_id,
field_type=field_type,
max_num_pages=SHARED_DRIVE_PAGES_PER_CHECKPOINT,
cache_folders=not bool(drive_start), # only cache folders for 0 or None
update_traversed_ids_func=self._update_traversed_parent_ids,
start=drive_start,
end=end,
page_token=checkpoint.completion_map[self.primary_admin_email].next_page_token,
),
self.primary_admin_email,
DriveRetrievalStage.SHARED_DRIVE_FILES,
parent_id=drive_id,
)
# If we are resuming from a checkpoint, we need to finish retrieving the files from the last drive we retrieved
if checkpoint.completion_map[self.primary_admin_email].stage == DriveRetrievalStage.SHARED_DRIVE_FILES:
drive_id = checkpoint.completion_map[self.primary_admin_email].current_folder_or_drive_id
if drive_id is None:
raise ValueError("drive id not set in checkpoint")
resume_start = checkpoint.completion_map[self.primary_admin_email].completed_until
for file_or_token in _yield_from_drive(drive_id, resume_start):
if isinstance(file_or_token, str):
checkpoint.completion_map[self.primary_admin_email].next_page_token = file_or_token
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | true |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/model.py | common/data_source/google_drive/model.py | from enum import Enum
from typing import Any
from pydantic import BaseModel, ConfigDict, field_serializer, field_validator
from common.data_source.google_util.util_threadpool_concurrency import ThreadSafeDict
from common.data_source.models import ConnectorCheckpoint, SecondsSinceUnixEpoch
GoogleDriveFileType = dict[str, Any]
class GDriveMimeType(str, Enum):
DOC = "application/vnd.google-apps.document"
SPREADSHEET = "application/vnd.google-apps.spreadsheet"
SPREADSHEET_OPEN_FORMAT = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
SPREADSHEET_MS_EXCEL = "application/vnd.ms-excel"
PDF = "application/pdf"
WORD_DOC = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
PPT = "application/vnd.google-apps.presentation"
POWERPOINT = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
PLAIN_TEXT = "text/plain"
MARKDOWN = "text/markdown"
# These correspond to The major stages of retrieval for Google Drive.
# The stages for the oauth flow are:
# get_all_files_for_oauth(),
# get_all_drive_ids(),
# get_files_in_shared_drive(),
# crawl_folders_for_files()
#
# The stages for the service account flow are roughly:
# get_all_user_emails(),
# get_all_drive_ids(),
# get_files_in_shared_drive(),
# Then for each user:
# get_files_in_my_drive()
# get_files_in_shared_drive()
# crawl_folders_for_files()
class DriveRetrievalStage(str, Enum):
START = "start"
DONE = "done"
# OAuth specific stages
OAUTH_FILES = "oauth_files"
# Service account specific stages
USER_EMAILS = "user_emails"
MY_DRIVE_FILES = "my_drive_files"
# Used for both oauth and service account flows
DRIVE_IDS = "drive_ids"
SHARED_DRIVE_FILES = "shared_drive_files"
FOLDER_FILES = "folder_files"
class StageCompletion(BaseModel):
"""
Describes the point in the retrieval+indexing process that the
connector is at. completed_until is the timestamp of the latest
file that has been retrieved or error that has been yielded.
Optional fields are used for retrieval stages that need more information
for resuming than just the timestamp of the latest file.
"""
stage: DriveRetrievalStage
completed_until: SecondsSinceUnixEpoch
current_folder_or_drive_id: str | None = None
next_page_token: str | None = None
# only used for shared drives
processed_drive_ids: set[str] = set()
def update(
self,
stage: DriveRetrievalStage,
completed_until: SecondsSinceUnixEpoch,
current_folder_or_drive_id: str | None = None,
) -> None:
self.stage = stage
self.completed_until = completed_until
self.current_folder_or_drive_id = current_folder_or_drive_id
class GoogleDriveCheckpoint(ConnectorCheckpoint):
# Checkpoint version of _retrieved_ids
retrieved_folder_and_drive_ids: set[str]
# Describes the point in the retrieval+indexing process that the
# checkpoint is at. when this is set to a given stage, the connector
# has finished yielding all values from the previous stage.
completion_stage: DriveRetrievalStage
# The latest timestamp of a file that has been retrieved per user email.
# StageCompletion is used to track the completion of each stage, but the
# timestamp part is not used for folder crawling.
completion_map: ThreadSafeDict[str, StageCompletion]
# all file ids that have been retrieved
all_retrieved_file_ids: set[str] = set()
# cached version of the drive and folder ids to retrieve
drive_ids_to_retrieve: list[str] | None = None
folder_ids_to_retrieve: list[str] | None = None
# cached user emails
user_emails: list[str] | None = None
@field_serializer("completion_map")
def serialize_completion_map(self, completion_map: ThreadSafeDict[str, StageCompletion], _info: Any) -> dict[str, StageCompletion]:
return completion_map._dict
@field_validator("completion_map", mode="before")
def validate_completion_map(cls, v: Any) -> ThreadSafeDict[str, StageCompletion]:
assert isinstance(v, dict) or isinstance(v, ThreadSafeDict)
return ThreadSafeDict({k: StageCompletion.model_validate(val) for k, val in v.items()})
class RetrievedDriveFile(BaseModel):
"""
Describes a file that has been retrieved from Google Drive.
user_email is the email of the user that the file was retrieved
by impersonating. If an error worthy of being reported is encountered,
error should be set and later propagated as a ConnectorFailure.
"""
# The stage at which this file was retrieved
completion_stage: DriveRetrievalStage
# The file that was retrieved
drive_file: GoogleDriveFileType
# The email of the user that the file was retrieved by impersonating
user_email: str
# The id of the parent folder or drive of the file
parent_id: str | None = None
# Any unexpected error that occurred while retrieving the file.
# In particular, this is not used for 403/404 errors, which are expected
# in the context of impersonating all the users to try to retrieve all
# files from all their Drives and Folders.
error: Exception | None = None
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/section_extraction.py | common/data_source/google_drive/section_extraction.py | from typing import Any
from pydantic import BaseModel
from common.data_source.google_util.resource import GoogleDocsService
from common.data_source.models import TextSection
HEADING_DELIMITER = "\n"
class CurrentHeading(BaseModel):
id: str | None
text: str
def get_document_sections(
docs_service: GoogleDocsService,
doc_id: str,
) -> list[TextSection]:
"""Extracts sections from a Google Doc, including their headings and content"""
# Fetch the document structure
http_request = docs_service.documents().get(documentId=doc_id)
# Google has poor support for tabs in the docs api, see
# https://cloud.google.com/python/docs/reference/cloudtasks/
# latest/google.cloud.tasks_v2.types.HttpRequest
# https://developers.google.com/workspace/docs/api/how-tos/tabs
# https://developers.google.com/workspace/docs/api/reference/rest/v1/documents/get
# this is a hack to use the param mentioned in the rest api docs
# TODO: check if it can be specified i.e. in documents()
http_request.uri += "&includeTabsContent=true"
doc = http_request.execute()
# Get the content
tabs = doc.get("tabs", {})
sections: list[TextSection] = []
for tab in tabs:
sections.extend(get_tab_sections(tab, doc_id))
return sections
def _is_heading(paragraph: dict[str, Any]) -> bool:
"""Checks if a paragraph (a block of text in a drive document) is a heading"""
if not ("paragraphStyle" in paragraph and "namedStyleType" in paragraph["paragraphStyle"]):
return False
style = paragraph["paragraphStyle"]["namedStyleType"]
is_heading = style.startswith("HEADING_")
is_title = style.startswith("TITLE")
return is_heading or is_title
def _add_finished_section(
sections: list[TextSection],
doc_id: str,
tab_id: str,
current_heading: CurrentHeading,
current_section: list[str],
) -> None:
"""Adds a finished section to the list of sections if the section has content.
Returns the list of sections to use going forward, which may be the old list
if a new section was not added.
"""
if not (current_section or current_heading.text):
return
# If we were building a previous section, add it to sections list
# this is unlikely to ever matter, but helps if the doc contains weird headings
header_text = current_heading.text.replace(HEADING_DELIMITER, "")
section_text = f"{header_text}{HEADING_DELIMITER}" + "\n".join(current_section)
sections.append(
TextSection(
text=section_text.strip(),
link=_build_gdoc_section_link(doc_id, tab_id, current_heading.id),
)
)
def _build_gdoc_section_link(doc_id: str, tab_id: str, heading_id: str | None) -> str:
"""Builds a Google Doc link that jumps to a specific heading"""
# NOTE: doesn't support docs with multiple tabs atm, if we need that ask
# @Chris
heading_str = f"#heading={heading_id}" if heading_id else ""
return f"https://docs.google.com/document/d/{doc_id}/edit?tab={tab_id}{heading_str}"
def _extract_id_from_heading(paragraph: dict[str, Any]) -> str:
"""Extracts the id from a heading paragraph element"""
return paragraph["paragraphStyle"]["headingId"]
def _extract_text_from_paragraph(paragraph: dict[str, Any]) -> str:
"""Extracts the text content from a paragraph element"""
text_elements = []
for element in paragraph.get("elements", []):
if "textRun" in element:
text_elements.append(element["textRun"].get("content", ""))
# Handle links
if "textStyle" in element and "link" in element["textStyle"]:
text_elements.append(f"({element['textStyle']['link'].get('url', '')})")
if "person" in element:
name = element["person"].get("personProperties", {}).get("name", "")
email = element["person"].get("personProperties", {}).get("email", "")
person_str = "<Person|"
if name:
person_str += f"name: {name}, "
if email:
person_str += f"email: {email}"
person_str += ">"
text_elements.append(person_str)
if "richLink" in element:
props = element["richLink"].get("richLinkProperties", {})
title = props.get("title", "")
uri = props.get("uri", "")
link_str = f"[{title}]({uri})"
text_elements.append(link_str)
return "".join(text_elements)
def _extract_text_from_table(table: dict[str, Any]) -> str:
"""
Extracts the text content from a table element.
"""
row_strs = []
for row in table.get("tableRows", []):
cells = row.get("tableCells", [])
cell_strs = []
for cell in cells:
child_elements = cell.get("content", {})
cell_str = []
for child_elem in child_elements:
if "paragraph" not in child_elem:
continue
cell_str.append(_extract_text_from_paragraph(child_elem["paragraph"]))
cell_strs.append("".join(cell_str))
row_strs.append(", ".join(cell_strs))
return "\n".join(row_strs)
def get_tab_sections(tab: dict[str, Any], doc_id: str) -> list[TextSection]:
tab_id = tab["tabProperties"]["tabId"]
content = tab.get("documentTab", {}).get("body", {}).get("content", [])
sections: list[TextSection] = []
current_section: list[str] = []
current_heading = CurrentHeading(id=None, text="")
for element in content:
if "paragraph" in element:
paragraph = element["paragraph"]
# If this is not a heading, add content to current section
if not _is_heading(paragraph):
text = _extract_text_from_paragraph(paragraph)
if text.strip():
current_section.append(text)
continue
_add_finished_section(sections, doc_id, tab_id, current_heading, current_section)
current_section = []
# Start new heading
heading_id = _extract_id_from_heading(paragraph)
heading_text = _extract_text_from_paragraph(paragraph)
current_heading = CurrentHeading(
id=heading_id,
text=heading_text,
)
elif "table" in element:
text = _extract_text_from_table(element["table"])
if text.strip():
current_section.append(text)
# Don't forget to add the last section
_add_finished_section(sections, doc_id, tab_id, current_heading, current_section)
return sections
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/__init__.py | common/data_source/google_drive/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/file_retrieval.py | common/data_source/google_drive/file_retrieval.py | import logging
from collections.abc import Callable, Iterator
from datetime import datetime, timezone
from enum import Enum
from googleapiclient.discovery import Resource # type: ignore
from googleapiclient.errors import HttpError # type: ignore
from common.data_source.google_drive.constant import DRIVE_FOLDER_TYPE, DRIVE_SHORTCUT_TYPE
from common.data_source.google_drive.model import DriveRetrievalStage, GoogleDriveFileType, RetrievedDriveFile
from common.data_source.google_util.resource import GoogleDriveService
from common.data_source.google_util.util import ORDER_BY_KEY, PAGE_TOKEN_KEY, GoogleFields, execute_paginated_retrieval, execute_paginated_retrieval_with_max_pages
from common.data_source.models import SecondsSinceUnixEpoch
PERMISSION_FULL_DESCRIPTION = "permissions(id, emailAddress, type, domain, permissionDetails)"
FILE_FIELDS = "nextPageToken, files(mimeType, id, name, modifiedTime, webViewLink, shortcutDetails, owners(emailAddress), size)"
FILE_FIELDS_WITH_PERMISSIONS = f"nextPageToken, files(mimeType, id, name, {PERMISSION_FULL_DESCRIPTION}, permissionIds, modifiedTime, webViewLink, shortcutDetails, owners(emailAddress), size)"
SLIM_FILE_FIELDS = f"nextPageToken, files(mimeType, driveId, id, name, {PERMISSION_FULL_DESCRIPTION}, permissionIds, webViewLink, owners(emailAddress), modifiedTime)"
FOLDER_FIELDS = "nextPageToken, files(id, name, permissions, modifiedTime, webViewLink, shortcutDetails)"
class DriveFileFieldType(Enum):
"""Enum to specify which fields to retrieve from Google Drive files"""
SLIM = "slim" # Minimal fields for basic file info
STANDARD = "standard" # Standard fields including content metadata
WITH_PERMISSIONS = "with_permissions" # Full fields including permissions
def generate_time_range_filter(
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> str:
time_range_filter = ""
if start is not None:
time_start = datetime.fromtimestamp(start, tz=timezone.utc).isoformat()
time_range_filter += f" and {GoogleFields.MODIFIED_TIME.value} > '{time_start}'"
if end is not None:
time_stop = datetime.fromtimestamp(end, tz=timezone.utc).isoformat()
time_range_filter += f" and {GoogleFields.MODIFIED_TIME.value} <= '{time_stop}'"
return time_range_filter
def _get_folders_in_parent(
service: Resource,
parent_id: str | None = None,
) -> Iterator[GoogleDriveFileType]:
# Follow shortcuts to folders
query = f"(mimeType = '{DRIVE_FOLDER_TYPE}' or mimeType = '{DRIVE_SHORTCUT_TYPE}')"
query += " and trashed = false"
if parent_id:
query += f" and '{parent_id}' in parents"
for file in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=True,
corpora="allDrives",
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields=FOLDER_FIELDS,
q=query,
):
yield file
def _get_fields_for_file_type(field_type: DriveFileFieldType) -> str:
"""Get the appropriate fields string based on the field type enum"""
if field_type == DriveFileFieldType.SLIM:
return SLIM_FILE_FIELDS
elif field_type == DriveFileFieldType.WITH_PERMISSIONS:
return FILE_FIELDS_WITH_PERMISSIONS
else: # DriveFileFieldType.STANDARD
return FILE_FIELDS
def _get_files_in_parent(
service: Resource,
parent_id: str,
field_type: DriveFileFieldType,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[GoogleDriveFileType]:
query = f"mimeType != '{DRIVE_FOLDER_TYPE}' and '{parent_id}' in parents"
query += " and trashed = false"
query += generate_time_range_filter(start, end)
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
for file in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=True,
corpora="allDrives",
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields=_get_fields_for_file_type(field_type),
q=query,
**kwargs,
):
yield file
def crawl_folders_for_files(
service: Resource,
parent_id: str,
field_type: DriveFileFieldType,
user_email: str,
traversed_parent_ids: set[str],
update_traversed_ids_func: Callable[[str], None],
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[RetrievedDriveFile]:
"""
This function starts crawling from any folder. It is slower though.
"""
logging.info("Entered crawl_folders_for_files with parent_id: " + parent_id)
if parent_id not in traversed_parent_ids:
logging.info("Parent id not in traversed parent ids, getting files")
found_files = False
file = {}
try:
for file in _get_files_in_parent(
service=service,
parent_id=parent_id,
field_type=field_type,
start=start,
end=end,
):
logging.info(f"Found file: {file['name']}, user email: {user_email}")
found_files = True
yield RetrievedDriveFile(
drive_file=file,
user_email=user_email,
parent_id=parent_id,
completion_stage=DriveRetrievalStage.FOLDER_FILES,
)
# Only mark a folder as done if it was fully traversed without errors
# This usually indicates that the owner of the folder was impersonated.
# In cases where this never happens, most likely the folder owner is
# not part of the Google Workspace in question (or for oauth, the authenticated
# user doesn't own the folder)
if found_files:
update_traversed_ids_func(parent_id)
except Exception as e:
if isinstance(e, HttpError) and e.status_code == 403:
# don't yield an error here because this is expected behavior
# when a user doesn't have access to a folder
logging.debug(f"Error getting files in parent {parent_id}: {e}")
else:
logging.error(f"Error getting files in parent {parent_id}: {e}")
yield RetrievedDriveFile(
drive_file=file,
user_email=user_email,
parent_id=parent_id,
completion_stage=DriveRetrievalStage.FOLDER_FILES,
error=e,
)
else:
logging.info(f"Skipping subfolder files since already traversed: {parent_id}")
for subfolder in _get_folders_in_parent(
service=service,
parent_id=parent_id,
):
logging.info("Fetching all files in subfolder: " + subfolder["name"])
yield from crawl_folders_for_files(
service=service,
parent_id=subfolder["id"],
field_type=field_type,
user_email=user_email,
traversed_parent_ids=traversed_parent_ids,
update_traversed_ids_func=update_traversed_ids_func,
start=start,
end=end,
)
def get_files_in_shared_drive(
service: Resource,
drive_id: str,
field_type: DriveFileFieldType,
max_num_pages: int,
update_traversed_ids_func: Callable[[str], None] = lambda _: None,
cache_folders: bool = True,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
page_token: str | None = None,
) -> Iterator[GoogleDriveFileType | str]:
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
if page_token:
logging.info(f"Using page token: {page_token}")
kwargs[PAGE_TOKEN_KEY] = page_token
if cache_folders:
# If we know we are going to folder crawl later, we can cache the folders here
# Get all folders being queried and add them to the traversed set
folder_query = f"mimeType = '{DRIVE_FOLDER_TYPE}'"
folder_query += " and trashed = false"
for folder in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=True,
corpora="drive",
driveId=drive_id,
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields="nextPageToken, files(id)",
q=folder_query,
):
update_traversed_ids_func(folder["id"])
# Get all files in the shared drive
file_query = f"mimeType != '{DRIVE_FOLDER_TYPE}'"
file_query += " and trashed = false"
file_query += generate_time_range_filter(start, end)
for file in execute_paginated_retrieval_with_max_pages(
retrieval_function=service.files().list,
max_num_pages=max_num_pages,
list_key="files",
continue_on_404_or_403=True,
corpora="drive",
driveId=drive_id,
supportsAllDrives=True,
includeItemsFromAllDrives=True,
fields=_get_fields_for_file_type(field_type),
q=file_query,
**kwargs,
):
# If we found any files, mark this drive as traversed. When a user has access to a drive,
# they have access to all the files in the drive. Also, not a huge deal if we re-traverse
# empty drives.
# NOTE: ^^ the above is not actually true due to folder restrictions:
# https://support.google.com/a/users/answer/12380484?hl=en
# So we may have to change this logic for people who use folder restrictions.
update_traversed_ids_func(drive_id)
yield file
def get_all_files_in_my_drive_and_shared(
service: GoogleDriveService,
update_traversed_ids_func: Callable,
field_type: DriveFileFieldType,
include_shared_with_me: bool,
max_num_pages: int,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
cache_folders: bool = True,
page_token: str | None = None,
) -> Iterator[GoogleDriveFileType | str]:
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
if page_token:
logging.info(f"Using page token: {page_token}")
kwargs[PAGE_TOKEN_KEY] = page_token
if cache_folders:
# If we know we are going to folder crawl later, we can cache the folders here
# Get all folders being queried and add them to the traversed set
folder_query = f"mimeType = '{DRIVE_FOLDER_TYPE}'"
folder_query += " and trashed = false"
if not include_shared_with_me:
folder_query += " and 'me' in owners"
found_folders = False
for folder in execute_paginated_retrieval(
retrieval_function=service.files().list,
list_key="files",
corpora="user",
fields=_get_fields_for_file_type(field_type),
q=folder_query,
):
update_traversed_ids_func(folder[GoogleFields.ID])
found_folders = True
if found_folders:
update_traversed_ids_func(get_root_folder_id(service))
# Then get the files
file_query = f"mimeType != '{DRIVE_FOLDER_TYPE}'"
file_query += " and trashed = false"
if not include_shared_with_me:
file_query += " and 'me' in owners"
file_query += generate_time_range_filter(start, end)
yield from execute_paginated_retrieval_with_max_pages(
retrieval_function=service.files().list,
max_num_pages=max_num_pages,
list_key="files",
continue_on_404_or_403=False,
corpora="user",
fields=_get_fields_for_file_type(field_type),
q=file_query,
**kwargs,
)
def get_all_files_for_oauth(
service: GoogleDriveService,
include_files_shared_with_me: bool,
include_my_drives: bool,
# One of the above 2 should be true
include_shared_drives: bool,
field_type: DriveFileFieldType,
max_num_pages: int,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
page_token: str | None = None,
) -> Iterator[GoogleDriveFileType | str]:
kwargs = {ORDER_BY_KEY: GoogleFields.MODIFIED_TIME.value}
if page_token:
logging.info(f"Using page token: {page_token}")
kwargs[PAGE_TOKEN_KEY] = page_token
should_get_all = include_shared_drives and include_my_drives and include_files_shared_with_me
corpora = "allDrives" if should_get_all else "user"
file_query = f"mimeType != '{DRIVE_FOLDER_TYPE}'"
file_query += " and trashed = false"
file_query += generate_time_range_filter(start, end)
if not should_get_all:
if include_files_shared_with_me and not include_my_drives:
file_query += " and not 'me' in owners"
if not include_files_shared_with_me and include_my_drives:
file_query += " and 'me' in owners"
yield from execute_paginated_retrieval_with_max_pages(
max_num_pages=max_num_pages,
retrieval_function=service.files().list,
list_key="files",
continue_on_404_or_403=False,
corpora=corpora,
includeItemsFromAllDrives=should_get_all,
supportsAllDrives=should_get_all,
fields=_get_fields_for_file_type(field_type),
q=file_query,
**kwargs,
)
# Just in case we need to get the root folder id
def get_root_folder_id(service: Resource) -> str:
# we don't paginate here because there is only one root folder per user
# https://developers.google.com/drive/api/guides/v2-to-v3-reference
return service.files().get(fileId="root", fields=GoogleFields.ID.value).execute()[GoogleFields.ID.value]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_drive/constant.py | common/data_source/google_drive/constant.py | UNSUPPORTED_FILE_TYPE_CONTENT = "" # keep empty for now
DRIVE_FOLDER_TYPE = "application/vnd.google-apps.folder"
DRIVE_SHORTCUT_TYPE = "application/vnd.google-apps.shortcut"
DRIVE_FILE_TYPE = "application/vnd.google-apps.file"
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/bitbucket/connector.py | common/data_source/bitbucket/connector.py | from __future__ import annotations
import copy
from collections.abc import Callable
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
from typing import TYPE_CHECKING
from typing_extensions import override
from common.data_source.config import INDEX_BATCH_SIZE
from common.data_source.config import DocumentSource
from common.data_source.config import REQUEST_TIMEOUT_SECONDS
from common.data_source.exceptions import (
ConnectorMissingCredentialError,
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
)
from common.data_source.interfaces import CheckpointedConnector
from common.data_source.interfaces import CheckpointOutput
from common.data_source.interfaces import IndexingHeartbeatInterface
from common.data_source.interfaces import SecondsSinceUnixEpoch
from common.data_source.interfaces import SlimConnectorWithPermSync
from common.data_source.models import ConnectorCheckpoint
from common.data_source.models import ConnectorFailure
from common.data_source.models import DocumentFailure
from common.data_source.models import SlimDocument
from common.data_source.bitbucket.utils import (
build_auth_client,
list_repositories,
map_pr_to_document,
paginate,
PR_LIST_RESPONSE_FIELDS,
SLIM_PR_LIST_RESPONSE_FIELDS,
)
if TYPE_CHECKING:
import httpx
class BitbucketConnectorCheckpoint(ConnectorCheckpoint):
"""Checkpoint state for resumable Bitbucket PR indexing.
Fields:
repos_queue: Materialized list of repository slugs to process.
current_repo_index: Index of the repository currently being processed.
next_url: Bitbucket "next" URL for continuing pagination within the current repo.
"""
repos_queue: list[str] = []
current_repo_index: int = 0
next_url: str | None = None
class BitbucketConnector(
CheckpointedConnector[BitbucketConnectorCheckpoint],
SlimConnectorWithPermSync,
):
"""Connector for indexing Bitbucket Cloud pull requests.
Args:
workspace: Bitbucket workspace ID.
repositories: Comma-separated list of repository slugs to index.
projects: Comma-separated list of project keys to index all repositories within.
batch_size: Max number of documents to yield per batch.
"""
def __init__(
self,
workspace: str,
repositories: str | None = None,
projects: str | None = None,
batch_size: int = INDEX_BATCH_SIZE,
) -> None:
self.workspace = workspace
self._repositories = (
[s.strip() for s in repositories.split(",") if s.strip()]
if repositories
else None
)
self._projects: list[str] | None = (
[s.strip() for s in projects.split(",") if s.strip()] if projects else None
)
self.batch_size = batch_size
self.email: str | None = None
self.api_token: str | None = None
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
"""Load API token-based credentials.
Expects a dict with keys: `bitbucket_email`, `bitbucket_api_token`.
"""
self.email = credentials.get("bitbucket_email")
self.api_token = credentials.get("bitbucket_api_token")
if not self.email or not self.api_token:
raise ConnectorMissingCredentialError("Bitbucket")
return None
def _client(self) -> httpx.Client:
"""Build an authenticated HTTP client or raise if credentials missing."""
if not self.email or not self.api_token:
raise ConnectorMissingCredentialError("Bitbucket")
return build_auth_client(self.email, self.api_token)
def _iter_pull_requests_for_repo(
self,
client: httpx.Client,
repo_slug: str,
params: dict[str, Any] | None = None,
start_url: str | None = None,
on_page: Callable[[str | None], None] | None = None,
) -> Iterator[dict[str, Any]]:
base = f"https://api.bitbucket.org/2.0/repositories/{self.workspace}/{repo_slug}/pullrequests"
yield from paginate(
client,
base,
params,
start_url=start_url,
on_page=on_page,
)
def _build_params(
self,
fields: str = PR_LIST_RESPONSE_FIELDS,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> dict[str, Any]:
"""Build Bitbucket fetch params.
Always include OPEN, MERGED, and DECLINED PRs. If both ``start`` and
``end`` are provided, apply a single updated_on time window.
"""
def _iso(ts: SecondsSinceUnixEpoch) -> str:
return datetime.fromtimestamp(ts, tz=timezone.utc).isoformat()
def _tc_epoch(
lower_epoch: SecondsSinceUnixEpoch | None,
upper_epoch: SecondsSinceUnixEpoch | None,
) -> str | None:
if lower_epoch is not None and upper_epoch is not None:
lower_iso = _iso(lower_epoch)
upper_iso = _iso(upper_epoch)
return f'(updated_on > "{lower_iso}" AND updated_on <= "{upper_iso}")'
return None
params: dict[str, Any] = {"fields": fields, "pagelen": 50}
time_clause = _tc_epoch(start, end)
q = '(state = "OPEN" OR state = "MERGED" OR state = "DECLINED")'
if time_clause:
q = f"{q} AND {time_clause}"
params["q"] = q
return params
def _iter_target_repositories(self, client: httpx.Client) -> Iterator[str]:
"""Yield repository slugs based on configuration.
Priority:
- repositories list
- projects list (list repos by project key)
- workspace (all repos)
"""
if self._repositories:
for slug in self._repositories:
yield slug
return
if self._projects:
for project_key in self._projects:
for repo in list_repositories(client, self.workspace, project_key):
slug_val = repo.get("slug")
if isinstance(slug_val, str) and slug_val:
yield slug_val
return
for repo in list_repositories(client, self.workspace, None):
slug_val = repo.get("slug")
if isinstance(slug_val, str) and slug_val:
yield slug_val
@override
def load_from_checkpoint(
self,
start: SecondsSinceUnixEpoch,
end: SecondsSinceUnixEpoch,
checkpoint: BitbucketConnectorCheckpoint,
) -> CheckpointOutput[BitbucketConnectorCheckpoint]:
"""Resumable PR ingestion across repos and pages within a time window.
Yields Documents (or ConnectorFailure for per-PR mapping failures) and returns
an updated checkpoint that records repo position and next page URL.
"""
new_checkpoint = copy.deepcopy(checkpoint)
with self._client() as client:
# Materialize target repositories once
if not new_checkpoint.repos_queue:
# Preserve explicit order; otherwise ensure deterministic ordering
repos_list = list(self._iter_target_repositories(client))
new_checkpoint.repos_queue = sorted(set(repos_list))
new_checkpoint.current_repo_index = 0
new_checkpoint.next_url = None
repos = new_checkpoint.repos_queue
if not repos or new_checkpoint.current_repo_index >= len(repos):
new_checkpoint.has_more = False
return new_checkpoint
repo_slug = repos[new_checkpoint.current_repo_index]
first_page_params = self._build_params(
fields=PR_LIST_RESPONSE_FIELDS,
start=start,
end=end,
)
def _on_page(next_url: str | None) -> None:
new_checkpoint.next_url = next_url
for pr in self._iter_pull_requests_for_repo(
client,
repo_slug,
params=first_page_params,
start_url=new_checkpoint.next_url,
on_page=_on_page,
):
try:
document = map_pr_to_document(pr, self.workspace, repo_slug)
yield document
except Exception as e:
pr_id = pr.get("id")
pr_link = (
f"https://bitbucket.org/{self.workspace}/{repo_slug}/pull-requests/{pr_id}"
if pr_id is not None
else None
)
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=(
f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{repo_slug}:pr:{pr_id}"
if pr_id is not None
else f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{repo_slug}:pr:unknown"
),
document_link=pr_link,
),
failure_message=f"Failed to process Bitbucket PR: {e}",
exception=e,
)
# Advance to next repository (if any) and set has_more accordingly
new_checkpoint.current_repo_index += 1
new_checkpoint.next_url = None
new_checkpoint.has_more = new_checkpoint.current_repo_index < len(repos)
return new_checkpoint
@override
def build_dummy_checkpoint(self) -> BitbucketConnectorCheckpoint:
"""Create an initial checkpoint with work remaining."""
return BitbucketConnectorCheckpoint(has_more=True)
@override
def validate_checkpoint_json(
self, checkpoint_json: str
) -> BitbucketConnectorCheckpoint:
"""Validate and deserialize a checkpoint instance from JSON."""
return BitbucketConnectorCheckpoint.model_validate_json(checkpoint_json)
def retrieve_all_slim_docs_perm_sync(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
callback: IndexingHeartbeatInterface | None = None,
) -> Iterator[list[SlimDocument]]:
"""Return only document IDs for all existing pull requests."""
batch: list[SlimDocument] = []
params = self._build_params(
fields=SLIM_PR_LIST_RESPONSE_FIELDS,
start=start,
end=end,
)
with self._client() as client:
for slug in self._iter_target_repositories(client):
for pr in self._iter_pull_requests_for_repo(
client, slug, params=params
):
pr_id = pr["id"]
doc_id = f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{slug}:pr:{pr_id}"
batch.append(SlimDocument(id=doc_id))
if len(batch) >= self.batch_size:
yield batch
batch = []
if callback:
if callback.should_stop():
# Note: this is not actually used for permission sync yet, just pruning
raise RuntimeError(
"bitbucket_pr_sync: Stop signal detected"
)
callback.progress("bitbucket_pr_sync", len(batch))
if batch:
yield batch
def validate_connector_settings(self) -> None:
"""Validate Bitbucket credentials and workspace access by probing a lightweight endpoint.
Raises:
CredentialExpiredError: on HTTP 401
InsufficientPermissionsError: on HTTP 403
UnexpectedValidationError: on any other failure
"""
try:
with self._client() as client:
url = f"https://api.bitbucket.org/2.0/repositories/{self.workspace}"
resp = client.get(
url,
params={"pagelen": 1, "fields": "pagelen"},
timeout=REQUEST_TIMEOUT_SECONDS,
)
if resp.status_code == 401:
raise CredentialExpiredError(
"Invalid or expired Bitbucket credentials (HTTP 401)."
)
if resp.status_code == 403:
raise InsufficientPermissionsError(
"Insufficient permissions to access Bitbucket workspace (HTTP 403)."
)
if resp.status_code < 200 or resp.status_code >= 300:
raise UnexpectedValidationError(
f"Unexpected Bitbucket error (status={resp.status_code})."
)
except Exception as e:
# Network or other unexpected errors
if isinstance(
e,
(
CredentialExpiredError,
InsufficientPermissionsError,
UnexpectedValidationError,
ConnectorMissingCredentialError,
),
):
raise
raise UnexpectedValidationError(
f"Unexpected error while validating Bitbucket settings: {e}"
)
if __name__ == "__main__":
bitbucket = BitbucketConnector(
workspace="<YOUR_WORKSPACE>"
)
bitbucket.load_credentials({
"bitbucket_email": "<YOUR_EMAIL>",
"bitbucket_api_token": "<YOUR_API_TOKEN>",
})
bitbucket.validate_connector_settings()
print("Credentials validated successfully.")
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
end_time = datetime.now(timezone.utc)
for doc_batch in bitbucket.retrieve_all_slim_docs_perm_sync(
start=start_time.timestamp(),
end=end_time.timestamp(),
):
for doc in doc_batch:
print(doc)
bitbucket_checkpoint = bitbucket.build_dummy_checkpoint()
while bitbucket_checkpoint.has_more:
gen = bitbucket.load_from_checkpoint(
start=start_time.timestamp(),
end=end_time.timestamp(),
checkpoint=bitbucket_checkpoint,
)
while True:
try:
doc = next(gen)
print(doc)
except StopIteration as e:
bitbucket_checkpoint = e.value
break
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/bitbucket/utils.py | common/data_source/bitbucket/utils.py | from __future__ import annotations
import time
from collections.abc import Callable
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
import httpx
from common.data_source.config import REQUEST_TIMEOUT_SECONDS, DocumentSource
from common.data_source.cross_connector_utils.rate_limit_wrapper import (
rate_limit_builder,
)
from common.data_source.utils import sanitize_filename
from common.data_source.models import BasicExpertInfo, Document
from common.data_source.cross_connector_utils.retry_wrapper import retry_builder
# Fields requested from Bitbucket PR list endpoint to ensure rich PR data
PR_LIST_RESPONSE_FIELDS: str = ",".join(
[
"next",
"page",
"pagelen",
"values.author",
"values.close_source_branch",
"values.closed_by",
"values.comment_count",
"values.created_on",
"values.description",
"values.destination",
"values.draft",
"values.id",
"values.links",
"values.merge_commit",
"values.participants",
"values.reason",
"values.rendered",
"values.reviewers",
"values.source",
"values.state",
"values.summary",
"values.task_count",
"values.title",
"values.type",
"values.updated_on",
]
)
# Minimal fields for slim retrieval (IDs only)
SLIM_PR_LIST_RESPONSE_FIELDS: str = ",".join(
[
"next",
"page",
"pagelen",
"values.id",
]
)
# Minimal fields for repository list calls
REPO_LIST_RESPONSE_FIELDS: str = ",".join(
[
"next",
"page",
"pagelen",
"values.slug",
"values.full_name",
"values.project.key",
]
)
class BitbucketRetriableError(Exception):
"""Raised for retriable Bitbucket conditions (429, 5xx)."""
class BitbucketNonRetriableError(Exception):
"""Raised for non-retriable Bitbucket client errors (4xx except 429)."""
@retry_builder(
tries=6,
delay=1,
backoff=2,
max_delay=30,
exceptions=(BitbucketRetriableError, httpx.RequestError),
)
@rate_limit_builder(max_calls=60, period=60)
def bitbucket_get(
client: httpx.Client, url: str, params: dict[str, Any] | None = None
) -> httpx.Response:
"""Perform a GET against Bitbucket with retry and rate limiting.
Retries on 429 and 5xx responses, and on transport errors. Honors
`Retry-After` header for 429 when present by sleeping before retrying.
"""
try:
response = client.get(url, params=params, timeout=REQUEST_TIMEOUT_SECONDS)
except httpx.RequestError:
# Allow retry_builder to handle retries of transport errors
raise
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
status = e.response.status_code if e.response is not None else None
if status == 429:
retry_after = e.response.headers.get("Retry-After") if e.response else None
if retry_after is not None:
try:
time.sleep(int(retry_after))
except (TypeError, ValueError):
pass
raise BitbucketRetriableError("Bitbucket rate limit exceeded (429)") from e
if status is not None and 500 <= status < 600:
raise BitbucketRetriableError(f"Bitbucket server error: {status}") from e
if status is not None and 400 <= status < 500:
raise BitbucketNonRetriableError(f"Bitbucket client error: {status}") from e
# Unknown status, propagate
raise
return response
def build_auth_client(email: str, api_token: str) -> httpx.Client:
"""Create an authenticated httpx client for Bitbucket Cloud API."""
return httpx.Client(auth=(email, api_token), http2=True)
def paginate(
client: httpx.Client,
url: str,
params: dict[str, Any] | None = None,
start_url: str | None = None,
on_page: Callable[[str | None], None] | None = None,
) -> Iterator[dict[str, Any]]:
"""Iterate over paginated Bitbucket API responses yielding individual values.
Args:
client: Authenticated HTTP client.
url: Base collection URL (first page when start_url is None).
params: Query params for the first page.
start_url: If provided, start from this absolute URL (ignores params).
on_page: Optional callback invoked after each page with the next page URL.
"""
next_url = start_url or url
# If resuming from a next URL, do not pass params again
query = params.copy() if params else None
query = None if start_url else query
while next_url:
resp = bitbucket_get(client, next_url, params=query)
data = resp.json()
values = data.get("values", [])
for item in values:
yield item
next_url = data.get("next")
if on_page is not None:
on_page(next_url)
# only include params on first call, next_url will contain all necessary params
query = None
def list_repositories(
client: httpx.Client, workspace: str, project_key: str | None = None
) -> Iterator[dict[str, Any]]:
"""List repositories in a workspace, optionally filtered by project key."""
base_url = f"https://api.bitbucket.org/2.0/repositories/{workspace}"
params: dict[str, Any] = {
"fields": REPO_LIST_RESPONSE_FIELDS,
"pagelen": 100,
# Ensure deterministic ordering
"sort": "full_name",
}
if project_key:
params["q"] = f'project.key="{project_key}"'
yield from paginate(client, base_url, params)
def map_pr_to_document(pr: dict[str, Any], workspace: str, repo_slug: str) -> Document:
"""Map a Bitbucket pull request JSON to Onyx Document."""
pr_id = pr["id"]
title = pr.get("title") or f"PR {pr_id}"
description = pr.get("description") or ""
state = pr.get("state")
draft = pr.get("draft", False)
author = pr.get("author", {})
reviewers = pr.get("reviewers", [])
participants = pr.get("participants", [])
link = pr.get("links", {}).get("html", {}).get("href") or (
f"https://bitbucket.org/{workspace}/{repo_slug}/pull-requests/{pr_id}"
)
created_on = pr.get("created_on")
updated_on = pr.get("updated_on")
updated_dt = (
datetime.fromisoformat(updated_on.replace("Z", "+00:00")).astimezone(
timezone.utc
)
if isinstance(updated_on, str)
else None
)
source_branch = pr.get("source", {}).get("branch", {}).get("name", "")
destination_branch = pr.get("destination", {}).get("branch", {}).get("name", "")
approved_by = [
_get_user_name(p.get("user", {})) for p in participants if p.get("approved")
]
primary_owner = None
if author:
primary_owner = BasicExpertInfo(
display_name=_get_user_name(author),
)
# secondary_owners = [
# BasicExpertInfo(display_name=_get_user_name(r)) for r in reviewers
# ] or None
reviewer_names = [_get_user_name(r) for r in reviewers]
# Create a concise summary of key PR info
created_date = created_on.split("T")[0] if created_on else "N/A"
updated_date = updated_on.split("T")[0] if updated_on else "N/A"
content_text = (
"Pull Request Information:\n"
f"- Pull Request ID: {pr_id}\n"
f"- Title: {title}\n"
f"- State: {state or 'N/A'} {'(Draft)' if draft else ''}\n"
)
if state == "DECLINED":
content_text += f"- Reason: {pr.get('reason', 'N/A')}\n"
content_text += (
f"- Author: {_get_user_name(author) if author else 'N/A'}\n"
f"- Reviewers: {', '.join(reviewer_names) if reviewer_names else 'N/A'}\n"
f"- Branch: {source_branch} -> {destination_branch}\n"
f"- Created: {created_date}\n"
f"- Updated: {updated_date}"
)
if description:
content_text += f"\n\nDescription:\n{description}"
metadata: dict[str, str | list[str]] = {
"object_type": "PullRequest",
"workspace": workspace,
"repository": repo_slug,
"pr_key": f"{workspace}/{repo_slug}#{pr_id}",
"id": str(pr_id),
"title": title,
"state": state or "",
"draft": str(bool(draft)),
"link": link,
"author": _get_user_name(author) if author else "",
"reviewers": reviewer_names,
"approved_by": approved_by,
"comment_count": str(pr.get("comment_count", "")),
"task_count": str(pr.get("task_count", "")),
"created_on": created_on or "",
"updated_on": updated_on or "",
"source_branch": source_branch,
"destination_branch": destination_branch,
"closed_by": (
_get_user_name(pr.get("closed_by", {})) if pr.get("closed_by") else ""
),
"close_source_branch": str(bool(pr.get("close_source_branch", False))),
}
name = sanitize_filename(title, "md")
return Document(
id=f"{DocumentSource.BITBUCKET.value}:{workspace}:{repo_slug}:pr:{pr_id}",
blob=content_text.encode("utf-8"),
source=DocumentSource.BITBUCKET,
extension=".md",
semantic_identifier=f"#{pr_id}: {name}",
size_bytes=len(content_text.encode("utf-8")),
doc_updated_at=updated_dt,
primary_owners=[primary_owner] if primary_owner else None,
# secondary_owners=secondary_owners,
metadata=metadata,
)
def _get_user_name(user: dict[str, Any]) -> str:
return user.get("display_name") or user.get("nickname") or "unknown" | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/bitbucket/__init__.py | common/data_source/bitbucket/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/util.py | common/data_source/google_util/util.py | import json
import logging
import os
import re
import socket
from collections.abc import Callable, Iterator
from enum import Enum
from typing import Any
import unicodedata
from googleapiclient.errors import HttpError # type: ignore # type: ignore
from common.data_source.config import DocumentSource
from common.data_source.google_drive.model import GoogleDriveFileType
from common.data_source.google_util.oauth_flow import ensure_oauth_token_dict
# See https://developers.google.com/drive/api/reference/rest/v3/files/list for more
class GoogleFields(str, Enum):
ID = "id"
CREATED_TIME = "createdTime"
MODIFIED_TIME = "modifiedTime"
NAME = "name"
SIZE = "size"
PARENTS = "parents"
NEXT_PAGE_TOKEN_KEY = "nextPageToken"
PAGE_TOKEN_KEY = "pageToken"
ORDER_BY_KEY = "orderBy"
def get_file_owners(file: GoogleDriveFileType, primary_admin_email: str) -> list[str]:
"""
Get the owners of a file if the attribute is present.
"""
return [email for owner in file.get("owners", []) if (email := owner.get("emailAddress")) and email.split("@")[-1] == primary_admin_email.split("@")[-1]]
# included for type purposes; caller should not need to address
# Nones unless max_num_pages is specified. Use
# execute_paginated_retrieval_with_max_pages instead if you want
# the early stop + yield None after max_num_pages behavior.
def execute_paginated_retrieval(
retrieval_function: Callable,
list_key: str | None = None,
continue_on_404_or_403: bool = False,
**kwargs: Any,
) -> Iterator[GoogleDriveFileType]:
for item in _execute_paginated_retrieval(
retrieval_function,
list_key,
continue_on_404_or_403,
**kwargs,
):
if not isinstance(item, str):
yield item
def execute_paginated_retrieval_with_max_pages(
retrieval_function: Callable,
max_num_pages: int,
list_key: str | None = None,
continue_on_404_or_403: bool = False,
**kwargs: Any,
) -> Iterator[GoogleDriveFileType | str]:
yield from _execute_paginated_retrieval(
retrieval_function,
list_key,
continue_on_404_or_403,
max_num_pages=max_num_pages,
**kwargs,
)
def _execute_paginated_retrieval(
retrieval_function: Callable,
list_key: str | None = None,
continue_on_404_or_403: bool = False,
max_num_pages: int | None = None,
**kwargs: Any,
) -> Iterator[GoogleDriveFileType | str]:
"""Execute a paginated retrieval from Google Drive API
Args:
retrieval_function: The specific list function to call (e.g., service.files().list)
list_key: If specified, each object returned by the retrieval function
will be accessed at the specified key and yielded from.
continue_on_404_or_403: If True, the retrieval will continue even if the request returns a 404 or 403 error.
max_num_pages: If specified, the retrieval will stop after the specified number of pages and yield None.
**kwargs: Arguments to pass to the list function
"""
if "fields" not in kwargs or "nextPageToken" not in kwargs["fields"]:
raise ValueError("fields must contain nextPageToken for execute_paginated_retrieval")
next_page_token = kwargs.get(PAGE_TOKEN_KEY, "")
num_pages = 0
while next_page_token is not None:
if max_num_pages is not None and num_pages >= max_num_pages:
yield next_page_token
return
num_pages += 1
request_kwargs = kwargs.copy()
if next_page_token:
request_kwargs[PAGE_TOKEN_KEY] = next_page_token
results = _execute_single_retrieval(
retrieval_function,
continue_on_404_or_403,
**request_kwargs,
)
next_page_token = results.get(NEXT_PAGE_TOKEN_KEY)
if list_key:
for item in results.get(list_key, []):
yield item
else:
yield results
def _execute_single_retrieval(
retrieval_function: Callable,
continue_on_404_or_403: bool = False,
**request_kwargs: Any,
) -> GoogleDriveFileType:
"""Execute a single retrieval from Google Drive API"""
try:
results = retrieval_function(**request_kwargs).execute()
except HttpError as e:
if e.resp.status >= 500:
results = retrieval_function()
elif e.resp.status == 400:
if "pageToken" in request_kwargs and "Invalid Value" in str(e) and "pageToken" in str(e):
logging.warning(f"Invalid page token: {request_kwargs['pageToken']}, retrying from start of request")
request_kwargs.pop("pageToken")
return _execute_single_retrieval(
retrieval_function,
continue_on_404_or_403,
**request_kwargs,
)
logging.error(f"Error executing request: {e}")
raise e
elif e.resp.status == 404 or e.resp.status == 403:
if continue_on_404_or_403:
logging.debug(f"Error executing request: {e}")
results = {}
else:
raise e
elif e.resp.status == 429:
results = retrieval_function()
else:
logging.exception("Error executing request:")
raise e
except (TimeoutError, socket.timeout) as error:
logging.warning(
"Timed out executing Google API request; retrying with backoff. Details: %s",
error,
)
results = retrieval_function()
return results
def get_credentials_from_env(email: str, oauth: bool = False, source="drive") -> dict:
try:
if oauth:
raw_credential_string = os.environ["GOOGLE_OAUTH_CREDENTIALS_JSON_STR"]
else:
raw_credential_string = os.environ["GOOGLE_SERVICE_ACCOUNT_JSON_STR"]
except KeyError:
raise ValueError("Missing Google Drive credentials in environment variables")
try:
credential_dict = json.loads(raw_credential_string)
except json.JSONDecodeError:
raise ValueError("Invalid JSON in Google Drive credentials")
if oauth and source == "drive":
credential_dict = ensure_oauth_token_dict(credential_dict, DocumentSource.GOOGLE_DRIVE)
else:
credential_dict = ensure_oauth_token_dict(credential_dict, DocumentSource.GMAIL)
refried_credential_string = json.dumps(credential_dict)
DB_CREDENTIALS_DICT_TOKEN_KEY = "google_tokens"
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY = "google_service_account_key"
DB_CREDENTIALS_PRIMARY_ADMIN_KEY = "google_primary_admin"
DB_CREDENTIALS_AUTHENTICATION_METHOD = "authentication_method"
cred_key = DB_CREDENTIALS_DICT_TOKEN_KEY if oauth else DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY
return {
cred_key: refried_credential_string,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY: email,
DB_CREDENTIALS_AUTHENTICATION_METHOD: "uploaded",
}
def clean_string(text: str | None) -> str | None:
"""
Clean a string to make it safe for insertion into MySQL (utf8mb4).
- Normalize Unicode
- Remove control characters / zero-width characters
- Optionally remove high-plane emoji and symbols
"""
if text is None:
return None
# 0. Ensure the value is a string
text = str(text)
# 1. Normalize Unicode (NFC)
text = unicodedata.normalize("NFC", text)
# 2. Remove ASCII control characters (except tab, newline, carriage return)
text = re.sub(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]", "", text)
# 3. Remove zero-width characters / BOM
text = re.sub(r"[\u200b-\u200d\uFEFF]", "", text)
# 4. Remove high Unicode characters (emoji, special symbols)
text = re.sub(r"[\U00010000-\U0010FFFF]", "", text)
# 5. Final fallback: strip any invalid UTF-8 sequences
try:
text.encode("utf-8")
except UnicodeEncodeError:
text = text.encode("utf-8", errors="ignore").decode("utf-8")
return text | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/util_threadpool_concurrency.py | common/data_source/google_util/util_threadpool_concurrency.py | import collections.abc
import copy
import threading
from collections.abc import Callable, Iterator, MutableMapping
from typing import Any, TypeVar, overload
from pydantic import GetCoreSchemaHandler
from pydantic_core import core_schema
R = TypeVar("R")
KT = TypeVar("KT") # Key type
VT = TypeVar("VT") # Value type
_T = TypeVar("_T") # Default type
class ThreadSafeDict(MutableMapping[KT, VT]):
"""
A thread-safe dictionary implementation that uses a lock to ensure thread safety.
Implements the MutableMapping interface to provide a complete dictionary-like interface.
Example usage:
# Create a thread-safe dictionary
safe_dict: ThreadSafeDict[str, int] = ThreadSafeDict()
# Basic operations (atomic)
safe_dict["key"] = 1
value = safe_dict["key"]
del safe_dict["key"]
# Bulk operations (atomic)
safe_dict.update({"key1": 1, "key2": 2})
"""
def __init__(self, input_dict: dict[KT, VT] | None = None) -> None:
self._dict: dict[KT, VT] = input_dict or {}
self.lock = threading.Lock()
def __getitem__(self, key: KT) -> VT:
with self.lock:
return self._dict[key]
def __setitem__(self, key: KT, value: VT) -> None:
with self.lock:
self._dict[key] = value
def __delitem__(self, key: KT) -> None:
with self.lock:
del self._dict[key]
def __iter__(self) -> Iterator[KT]:
# Return a snapshot of keys to avoid potential modification during iteration
with self.lock:
return iter(list(self._dict.keys()))
def __len__(self) -> int:
with self.lock:
return len(self._dict)
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
return core_schema.no_info_after_validator_function(cls.validate, handler(dict[KT, VT]))
@classmethod
def validate(cls, v: Any) -> "ThreadSafeDict[KT, VT]":
if isinstance(v, dict):
return ThreadSafeDict(v)
return v
def __deepcopy__(self, memo: Any) -> "ThreadSafeDict[KT, VT]":
return ThreadSafeDict(copy.deepcopy(self._dict))
def clear(self) -> None:
"""Remove all items from the dictionary atomically."""
with self.lock:
self._dict.clear()
def copy(self) -> dict[KT, VT]:
"""Return a shallow copy of the dictionary atomically."""
with self.lock:
return self._dict.copy()
@overload
def get(self, key: KT) -> VT | None: ...
@overload
def get(self, key: KT, default: VT | _T) -> VT | _T: ...
def get(self, key: KT, default: Any = None) -> Any:
"""Get a value with a default, atomically."""
with self.lock:
return self._dict.get(key, default)
def pop(self, key: KT, default: Any = None) -> Any:
"""Remove and return a value with optional default, atomically."""
with self.lock:
if default is None:
return self._dict.pop(key)
return self._dict.pop(key, default)
def setdefault(self, key: KT, default: VT) -> VT:
"""Set a default value if key is missing, atomically."""
with self.lock:
return self._dict.setdefault(key, default)
def update(self, *args: Any, **kwargs: VT) -> None:
"""Update the dictionary atomically from another mapping or from kwargs."""
with self.lock:
self._dict.update(*args, **kwargs)
def items(self) -> collections.abc.ItemsView[KT, VT]:
"""Return a view of (key, value) pairs atomically."""
with self.lock:
return collections.abc.ItemsView(self)
def keys(self) -> collections.abc.KeysView[KT]:
"""Return a view of keys atomically."""
with self.lock:
return collections.abc.KeysView(self)
def values(self) -> collections.abc.ValuesView[VT]:
"""Return a view of values atomically."""
with self.lock:
return collections.abc.ValuesView(self)
@overload
def atomic_get_set(self, key: KT, value_callback: Callable[[VT], VT], default: VT) -> tuple[VT, VT]: ...
@overload
def atomic_get_set(self, key: KT, value_callback: Callable[[VT | _T], VT], default: VT | _T) -> tuple[VT | _T, VT]: ...
def atomic_get_set(self, key: KT, value_callback: Callable[[Any], VT], default: Any = None) -> tuple[Any, VT]:
"""Replace a value from the dict with a function applied to the previous value, atomically.
Returns:
A tuple of the previous value and the new value.
"""
with self.lock:
val = self._dict.get(key, default)
new_val = value_callback(val)
self._dict[key] = new_val
return val, new_val
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/resource.py | common/data_source/google_util/resource.py | import logging
from collections.abc import Callable
from typing import Any
from google.auth.exceptions import RefreshError # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore # type: ignore
from googleapiclient.discovery import (
Resource, # type: ignore
build, # type: ignore
)
class GoogleDriveService(Resource):
pass
class GoogleDocsService(Resource):
pass
class AdminService(Resource):
pass
class GmailService(Resource):
pass
class RefreshableDriveObject:
"""
Running Google Drive service retrieval functions
involves accessing methods of the service object (i.e. files().list())
which can raise a RefreshError if the access token is expired.
This class is a wrapper that propagates the ability to refresh the access token
and retry the final retrieval function until execute() is called.
"""
def __init__(
self,
call_stack: Callable[[ServiceAccountCredentials | OAuthCredentials], Any],
creds: ServiceAccountCredentials | OAuthCredentials,
creds_getter: Callable[..., ServiceAccountCredentials | OAuthCredentials],
):
self.call_stack = call_stack
self.creds = creds
self.creds_getter = creds_getter
def __getattr__(self, name: str) -> Any:
if name == "execute":
return self.make_refreshable_execute()
return RefreshableDriveObject(
lambda creds: getattr(self.call_stack(creds), name),
self.creds,
self.creds_getter,
)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return RefreshableDriveObject(
lambda creds: self.call_stack(creds)(*args, **kwargs),
self.creds,
self.creds_getter,
)
def make_refreshable_execute(self) -> Callable:
def execute(*args: Any, **kwargs: Any) -> Any:
try:
return self.call_stack(self.creds).execute(*args, **kwargs)
except RefreshError as e:
logging.warning(f"RefreshError, going to attempt a creds refresh and retry: {e}")
# Refresh the access token
self.creds = self.creds_getter()
return self.call_stack(self.creds).execute(*args, **kwargs)
return execute
def _get_google_service(
service_name: str,
service_version: str,
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GoogleDriveService | GoogleDocsService | AdminService | GmailService:
service: Resource
if isinstance(creds, ServiceAccountCredentials):
# NOTE: https://developers.google.com/identity/protocols/oauth2/service-account#error-codes
creds = creds.with_subject(user_email)
service = build(service_name, service_version, credentials=creds)
elif isinstance(creds, OAuthCredentials):
service = build(service_name, service_version, credentials=creds)
return service
def get_google_docs_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GoogleDocsService:
return _get_google_service("docs", "v1", creds, user_email)
def get_drive_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GoogleDriveService:
return _get_google_service("drive", "v3", creds, user_email)
def get_admin_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> AdminService:
return _get_google_service("admin", "directory_v1", creds, user_email)
def get_gmail_service(
creds: ServiceAccountCredentials | OAuthCredentials,
user_email: str | None = None,
) -> GmailService:
return _get_google_service("gmail", "v1", creds, user_email)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/oauth_flow.py | common/data_source/google_util/oauth_flow.py | import json
import os
import threading
from typing import Any, Callable
from common.data_source.config import DocumentSource
from common.data_source.google_util.constant import GOOGLE_SCOPES
def _get_requested_scopes(source: DocumentSource) -> list[str]:
"""Return the scopes to request, honoring an optional override env var."""
override = os.environ.get("GOOGLE_OAUTH_SCOPE_OVERRIDE", "")
if override.strip():
scopes = [scope.strip() for scope in override.split(",") if scope.strip()]
if scopes:
return scopes
return GOOGLE_SCOPES[source]
def _get_oauth_timeout_secs() -> int:
raw_timeout = os.environ.get("GOOGLE_OAUTH_FLOW_TIMEOUT_SECS", "300").strip()
try:
timeout = int(raw_timeout)
except ValueError:
timeout = 300
return timeout
def _run_with_timeout(func: Callable[[], Any], timeout_secs: int, timeout_message: str) -> Any:
if timeout_secs <= 0:
return func()
result: dict[str, Any] = {}
error: dict[str, BaseException] = {}
def _target() -> None:
try:
result["value"] = func()
except BaseException as exc: # pragma: no cover
error["error"] = exc
thread = threading.Thread(target=_target, daemon=True)
thread.start()
thread.join(timeout_secs)
if thread.is_alive():
raise TimeoutError(timeout_message)
if "error" in error:
raise error["error"]
return result.get("value")
def _run_local_server_flow(client_config: dict[str, Any], source: DocumentSource) -> dict[str, Any]:
"""Launch the standard Google OAuth local-server flow to mint user tokens."""
from google_auth_oauthlib.flow import InstalledAppFlow # type: ignore
scopes = _get_requested_scopes(source)
flow = InstalledAppFlow.from_client_config(
client_config,
scopes=scopes,
)
open_browser = os.environ.get("GOOGLE_OAUTH_OPEN_BROWSER", "true").lower() != "false"
preferred_port = os.environ.get("GOOGLE_OAUTH_LOCAL_SERVER_PORT")
port = int(preferred_port) if preferred_port else 0
timeout_secs = _get_oauth_timeout_secs()
timeout_message = f"Google OAuth verification timed out after {timeout_secs} seconds. Close any pending consent windows and rerun the connector configuration to try again."
print("Launching Google OAuth flow. A browser window should open shortly.")
print("If it does not, copy the URL shown in the console into your browser manually.")
if timeout_secs > 0:
print(f"You have {timeout_secs} seconds to finish granting access before the request times out.")
try:
creds = _run_with_timeout(
lambda: flow.run_local_server(port=port, open_browser=open_browser, prompt="consent"),
timeout_secs,
timeout_message,
)
except OSError as exc:
allow_console = os.environ.get("GOOGLE_OAUTH_ALLOW_CONSOLE_FALLBACK", "true").lower() != "false"
if not allow_console:
raise
print(f"Local server flow failed ({exc}). Falling back to console-based auth.")
creds = _run_with_timeout(flow.run_console, timeout_secs, timeout_message)
except Warning as warning:
warning_msg = str(warning)
if "Scope has changed" in warning_msg:
instructions = [
"Google rejected one or more of the requested OAuth scopes.",
"Fix options:",
" 1. In Google Cloud Console, open APIs & Services > OAuth consent screen and add the missing scopes (Drive metadata + Admin Directory read scopes), then re-run the flow.",
" 2. Set GOOGLE_OAUTH_SCOPE_OVERRIDE to a comma-separated list of scopes you are allowed to request.",
]
raise RuntimeError("\n".join(instructions)) from warning
raise
token_dict: dict[str, Any] = json.loads(creds.to_json())
print("\nGoogle OAuth flow completed successfully.")
print("Copy the JSON blob below into GOOGLE_DRIVE_OAUTH_CREDENTIALS_JSON_STR to reuse these tokens without re-authenticating:\n")
print(json.dumps(token_dict, indent=2))
print()
return token_dict
def ensure_oauth_token_dict(credentials: dict[str, Any], source: DocumentSource) -> dict[str, Any]:
"""Return a dict that contains OAuth tokens, running the flow if only a client config is provided."""
if "refresh_token" in credentials and "token" in credentials:
return credentials
client_config: dict[str, Any] | None = None
if "installed" in credentials:
client_config = {"installed": credentials["installed"]}
elif "web" in credentials:
client_config = {"web": credentials["web"]}
if client_config is None:
raise ValueError("Provided Google OAuth credentials are missing both tokens and a client configuration.")
return _run_local_server_flow(client_config, source)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/__init__.py | common/data_source/google_util/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/constant.py | common/data_source/google_util/constant.py | from enum import Enum
from common.data_source.config import DocumentSource
SLIM_BATCH_SIZE = 500
# NOTE: do not need https://www.googleapis.com/auth/documents.readonly
# this is counted under `/auth/drive.readonly`
GOOGLE_SCOPES = {
DocumentSource.GOOGLE_DRIVE: [
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/drive.metadata.readonly",
"https://www.googleapis.com/auth/admin.directory.group.readonly",
"https://www.googleapis.com/auth/admin.directory.user.readonly",
],
DocumentSource.GMAIL: [
"https://www.googleapis.com/auth/gmail.readonly",
"https://www.googleapis.com/auth/admin.directory.user.readonly",
"https://www.googleapis.com/auth/admin.directory.group.readonly",
],
}
# This is the Oauth token
DB_CREDENTIALS_DICT_TOKEN_KEY = "google_tokens"
# This is the service account key
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY = "google_service_account_key"
# The email saved for both auth types
DB_CREDENTIALS_PRIMARY_ADMIN_KEY = "google_primary_admin"
# https://developers.google.com/workspace/guides/create-credentials
# Internally defined authentication method type.
# The value must be one of "oauth_interactive" or "uploaded"
# Used to disambiguate whether credentials have already been created via
# certain methods and what actions we allow users to take
DB_CREDENTIALS_AUTHENTICATION_METHOD = "authentication_method"
class GoogleOAuthAuthenticationMethod(str, Enum):
OAUTH_INTERACTIVE = "oauth_interactive"
UPLOADED = "uploaded"
USER_FIELDS = "nextPageToken, users(primaryEmail)"
# Error message substrings
MISSING_SCOPES_ERROR_STR = "client not authorized for any of the scopes requested"
SCOPE_INSTRUCTIONS = ""
WEB_OAUTH_POPUP_TEMPLATE = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>{title}</title>
<style>
body {{
font-family: Arial, sans-serif;
background: #f8fafc;
color: #0f172a;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
min-height: 100vh;
margin: 0;
}}
.card {{
background: white;
padding: 32px;
border-radius: 12px;
box-shadow: 0 8px 30px rgba(15, 23, 42, 0.1);
max-width: 420px;
text-align: center;
}}
h1 {{
font-size: 1.5rem;
margin-bottom: 12px;
}}
p {{
font-size: 0.95rem;
line-height: 1.5;
}}
</style>
</head>
<body>
<div class="card">
<h1>{heading}</h1>
<p>{message}</p>
<p>You can close this window.</p>
</div>
<script>
(function(){{
if (window.opener) {{
window.opener.postMessage({payload_json}, "*");
}}
{auto_close}
}})();
</script>
</body>
</html>
"""
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/common/data_source/google_util/auth.py | common/data_source/google_util/auth.py | import json
import logging
from typing import Any
from google.auth.transport.requests import Request # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore # type: ignore
from common.data_source.config import OAUTH_GOOGLE_DRIVE_CLIENT_ID, OAUTH_GOOGLE_DRIVE_CLIENT_SECRET, DocumentSource
from common.data_source.google_util.constant import (
DB_CREDENTIALS_AUTHENTICATION_METHOD,
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY,
DB_CREDENTIALS_DICT_TOKEN_KEY,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY,
GOOGLE_SCOPES,
GoogleOAuthAuthenticationMethod,
)
from common.data_source.google_util.oauth_flow import ensure_oauth_token_dict
def sanitize_oauth_credentials(oauth_creds: OAuthCredentials) -> str:
"""we really don't want to be persisting the client id and secret anywhere but the
environment.
Returns a string of serialized json.
"""
# strip the client id and secret
oauth_creds_json_str = oauth_creds.to_json()
oauth_creds_sanitized_json: dict[str, Any] = json.loads(oauth_creds_json_str)
oauth_creds_sanitized_json.pop("client_id", None)
oauth_creds_sanitized_json.pop("client_secret", None)
oauth_creds_sanitized_json_str = json.dumps(oauth_creds_sanitized_json)
return oauth_creds_sanitized_json_str
def get_google_creds(
credentials: dict[str, str],
source: DocumentSource,
) -> tuple[ServiceAccountCredentials | OAuthCredentials, dict[str, str] | None]:
"""Checks for two different types of credentials.
(1) A credential which holds a token acquired via a user going through
the Google OAuth flow.
(2) A credential which holds a service account key JSON file, which
can then be used to impersonate any user in the workspace.
Return a tuple where:
The first element is the requested credentials
The second element is a new credentials dict that the caller should write back
to the db. This happens if token rotation occurs while loading credentials.
"""
oauth_creds = None
service_creds = None
new_creds_dict = None
if DB_CREDENTIALS_DICT_TOKEN_KEY in credentials:
# OAUTH
authentication_method: str = credentials.get(
DB_CREDENTIALS_AUTHENTICATION_METHOD,
GoogleOAuthAuthenticationMethod.UPLOADED,
)
credentials_dict_str = credentials[DB_CREDENTIALS_DICT_TOKEN_KEY]
credentials_dict = json.loads(credentials_dict_str)
regenerated_from_client_secret = False
if "client_id" not in credentials_dict or "client_secret" not in credentials_dict or "refresh_token" not in credentials_dict:
try:
credentials_dict = ensure_oauth_token_dict(credentials_dict, source)
except Exception as exc:
raise PermissionError(
"Google Drive OAuth credentials are incomplete. Please finish the OAuth flow to generate access tokens."
) from exc
credentials_dict_str = json.dumps(credentials_dict)
regenerated_from_client_secret = True
# only send what get_google_oauth_creds needs
authorized_user_info = {}
# oauth_interactive is sanitized and needs credentials from the environment
if authentication_method == GoogleOAuthAuthenticationMethod.OAUTH_INTERACTIVE:
authorized_user_info["client_id"] = OAUTH_GOOGLE_DRIVE_CLIENT_ID
authorized_user_info["client_secret"] = OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
else:
authorized_user_info["client_id"] = credentials_dict["client_id"]
authorized_user_info["client_secret"] = credentials_dict["client_secret"]
authorized_user_info["refresh_token"] = credentials_dict["refresh_token"]
authorized_user_info["token"] = credentials_dict["token"]
authorized_user_info["expiry"] = credentials_dict["expiry"]
token_json_str = json.dumps(authorized_user_info)
oauth_creds = get_google_oauth_creds(token_json_str=token_json_str, source=source)
# tell caller to update token stored in DB if the refresh token changed
if oauth_creds:
should_persist = regenerated_from_client_secret or oauth_creds.refresh_token != authorized_user_info["refresh_token"]
if should_persist:
# if oauth_interactive, sanitize the credentials so they don't get stored in the db
if authentication_method == GoogleOAuthAuthenticationMethod.OAUTH_INTERACTIVE:
oauth_creds_json_str = sanitize_oauth_credentials(oauth_creds)
else:
oauth_creds_json_str = oauth_creds.to_json()
new_creds_dict = {
DB_CREDENTIALS_DICT_TOKEN_KEY: oauth_creds_json_str,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY: credentials[DB_CREDENTIALS_PRIMARY_ADMIN_KEY],
DB_CREDENTIALS_AUTHENTICATION_METHOD: authentication_method,
}
elif DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY in credentials:
# SERVICE ACCOUNT
service_account_key_json_str = credentials[DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY]
service_account_key = json.loads(service_account_key_json_str)
service_creds = ServiceAccountCredentials.from_service_account_info(service_account_key, scopes=GOOGLE_SCOPES[source])
if not service_creds.valid or not service_creds.expired:
service_creds.refresh(Request())
if not service_creds.valid:
raise PermissionError(f"Unable to access {source} - service account credentials are invalid.")
creds: ServiceAccountCredentials | OAuthCredentials | None = oauth_creds or service_creds
if creds is None:
raise PermissionError(f"Unable to access {source} - unknown credential structure.")
return creds, new_creds_dict
def get_google_oauth_creds(token_json_str: str, source: DocumentSource) -> OAuthCredentials | None:
"""creds_json only needs to contain client_id, client_secret and refresh_token to
refresh the creds.
expiry and token are optional ... however, if passing in expiry, token
should also be passed in or else we may not return any creds.
(probably a sign we should refactor the function)
"""
creds_json = json.loads(token_json_str)
creds = OAuthCredentials.from_authorized_user_info(
info=creds_json,
scopes=GOOGLE_SCOPES[source],
)
if creds.valid:
return creds
if creds.expired and creds.refresh_token:
try:
creds.refresh(Request())
if creds.valid:
logging.info("Refreshed Google Drive tokens.")
return creds
except Exception:
logging.exception("Failed to refresh google drive access token")
return None
return None
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/example/sdk/dataset_example.py | example/sdk/dataset_example.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The example is about CRUD operations (Create, Read, Update, Delete) on a dataset.
"""
from ragflow_sdk import RAGFlow
import sys
HOST_ADDRESS = "http://127.0.0.1"
API_KEY = "ragflow-IzZmY1MGVhYTBhMjExZWZiYTdjMDI0Mm"
try:
# create a ragflow instance
ragflow_instance = RAGFlow(api_key=API_KEY, base_url=HOST_ADDRESS)
# crate a dataset instance
dataset_instance = ragflow_instance.create_dataset(name="dataset_instance")
# update the dataset instance
updated_message = {"name":"updated_dataset"}
updated_dataset = dataset_instance.update(updated_message)
# get the dataset (list datasets)
print(dataset_instance)
print(updated_dataset)
# delete the dataset (delete datasets)
to_be_deleted_datasets = [dataset_instance.id]
ragflow_instance.delete_datasets(ids=to_be_deleted_datasets)
print("test done")
sys.exit(0)
except Exception as e:
print(str(e))
sys.exit(-1)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/search.py | graphrag/search.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import json
import logging
from collections import defaultdict
from copy import deepcopy
import json_repair
import pandas as pd
from common.misc_utils import get_uuid
from graphrag.query_analyze_prompt import PROMPTS
from graphrag.utils import get_entity_type2samples, get_llm_cache, set_llm_cache, get_relation
from common.token_utils import num_tokens_from_string
from rag.nlp.search import Dealer, index_name
from common.float_utils import get_float
from common import settings
from common.doc_store.doc_store_base import OrderByExpr
class KGSearch(Dealer):
async def _chat(self, llm_bdl, system, history, gen_conf):
response = get_llm_cache(llm_bdl.llm_name, system, history, gen_conf)
if response:
return response
response = await llm_bdl.async_chat(system, history, gen_conf)
if response.find("**ERROR**") >= 0:
raise Exception(response)
set_llm_cache(llm_bdl.llm_name, system, response, history, gen_conf)
return response
async def query_rewrite(self, llm, question, idxnms, kb_ids):
ty2ents = get_entity_type2samples(idxnms, kb_ids)
hint_prompt = PROMPTS["minirag_query2kwd"].format(query=question,
TYPE_POOL=json.dumps(ty2ents, ensure_ascii=False, indent=2))
result = await self._chat(llm, hint_prompt, [{"role": "user", "content": "Output:"}], {})
try:
keywords_data = json_repair.loads(result)
type_keywords = keywords_data.get("answer_type_keywords", [])
entities_from_query = keywords_data.get("entities_from_query", [])[:5]
return type_keywords, entities_from_query
except json_repair.JSONDecodeError:
try:
result = result.replace(hint_prompt[:-1], '').replace('user', '').replace('model', '').strip()
result = '{' + result.split('{')[1].split('}')[0] + '}'
keywords_data = json_repair.loads(result)
type_keywords = keywords_data.get("answer_type_keywords", [])
entities_from_query = keywords_data.get("entities_from_query", [])[:5]
return type_keywords, entities_from_query
# Handle parsing error
except Exception as e:
logging.exception(f"JSON parsing error: {result} -> {e}")
raise e
def _ent_info_from_(self, es_res, sim_thr=0.3):
res = {}
flds = ["content_with_weight", "_score", "entity_kwd", "rank_flt", "n_hop_with_weight"]
es_res = self.dataStore.get_fields(es_res, flds)
for _, ent in es_res.items():
for f in flds:
if f in ent and ent[f] is None:
del ent[f]
if get_float(ent.get("_score", 0)) < sim_thr:
continue
if isinstance(ent["entity_kwd"], list):
ent["entity_kwd"] = ent["entity_kwd"][0]
res[ent["entity_kwd"]] = {
"sim": get_float(ent.get("_score", 0)),
"pagerank": get_float(ent.get("rank_flt", 0)),
"n_hop_ents": json.loads(ent.get("n_hop_with_weight", "[]")),
"description": ent.get("content_with_weight", "{}")
}
return res
def _relation_info_from_(self, es_res, sim_thr=0.3):
res = {}
es_res = self.dataStore.get_fields(es_res, ["content_with_weight", "_score", "from_entity_kwd", "to_entity_kwd",
"weight_int"])
for _, ent in es_res.items():
if get_float(ent["_score"]) < sim_thr:
continue
f, t = sorted([ent["from_entity_kwd"], ent["to_entity_kwd"]])
if isinstance(f, list):
f = f[0]
if isinstance(t, list):
t = t[0]
res[(f, t)] = {
"sim": get_float(ent["_score"]),
"pagerank": get_float(ent.get("weight_int", 0)),
"description": ent["content_with_weight"]
}
return res
def get_relevant_ents_by_keywords(self, keywords, filters, idxnms, kb_ids, emb_mdl, sim_thr=0.3, N=56):
if not keywords:
return {}
filters = deepcopy(filters)
filters["knowledge_graph_kwd"] = "entity"
matchDense = self.get_vector(", ".join(keywords), emb_mdl, 1024, sim_thr)
es_res = self.dataStore.search(["content_with_weight", "entity_kwd", "rank_flt"], [], filters, [matchDense],
OrderByExpr(), 0, N,
idxnms, kb_ids)
return self._ent_info_from_(es_res, sim_thr)
def get_relevant_relations_by_txt(self, txt, filters, idxnms, kb_ids, emb_mdl, sim_thr=0.3, N=56):
if not txt:
return {}
filters = deepcopy(filters)
filters["knowledge_graph_kwd"] = "relation"
matchDense = self.get_vector(txt, emb_mdl, 1024, sim_thr)
es_res = self.dataStore.search(
["content_with_weight", "_score", "from_entity_kwd", "to_entity_kwd", "weight_int"],
[], filters, [matchDense], OrderByExpr(), 0, N, idxnms, kb_ids)
return self._relation_info_from_(es_res, sim_thr)
def get_relevant_ents_by_types(self, types, filters, idxnms, kb_ids, N=56):
if not types:
return {}
filters = deepcopy(filters)
filters["knowledge_graph_kwd"] = "entity"
filters["entity_type_kwd"] = types
ordr = OrderByExpr()
ordr.desc("rank_flt")
es_res = self.dataStore.search(["entity_kwd", "rank_flt"], [], filters, [], ordr, 0, N,
idxnms, kb_ids)
return self._ent_info_from_(es_res, 0)
async def retrieval(self, question: str,
tenant_ids: str | list[str],
kb_ids: list[str],
emb_mdl,
llm,
max_token: int = 8196,
ent_topn: int = 6,
rel_topn: int = 6,
comm_topn: int = 1,
ent_sim_threshold: float = 0.3,
rel_sim_threshold: float = 0.3,
**kwargs
):
qst = question
filters = self.get_filters({"kb_ids": kb_ids})
if isinstance(tenant_ids, str):
tenant_ids = tenant_ids.split(",")
idxnms = [index_name(tid) for tid in tenant_ids]
ty_kwds = []
try:
ty_kwds, ents = await self.query_rewrite(llm, qst, [index_name(tid) for tid in tenant_ids], kb_ids)
logging.info(f"Q: {qst}, Types: {ty_kwds}, Entities: {ents}")
except Exception as e:
logging.exception(e)
ents = [qst]
pass
ents_from_query = self.get_relevant_ents_by_keywords(ents, filters, idxnms, kb_ids, emb_mdl, ent_sim_threshold)
ents_from_types = self.get_relevant_ents_by_types(ty_kwds, filters, idxnms, kb_ids, 10000)
rels_from_txt = self.get_relevant_relations_by_txt(qst, filters, idxnms, kb_ids, emb_mdl, rel_sim_threshold)
nhop_pathes = defaultdict(dict)
for _, ent in ents_from_query.items():
nhops = ent.get("n_hop_ents", [])
if not isinstance(nhops, list):
logging.warning(f"Abnormal n_hop_ents: {nhops}")
continue
for nbr in nhops:
path = nbr["path"]
wts = nbr["weights"]
for i in range(len(path) - 1):
f, t = path[i], path[i + 1]
if (f, t) in nhop_pathes:
nhop_pathes[(f, t)]["sim"] += ent["sim"] / (2 + i)
else:
nhop_pathes[(f, t)]["sim"] = ent["sim"] / (2 + i)
nhop_pathes[(f, t)]["pagerank"] = wts[i]
logging.info("Retrieved entities: {}".format(list(ents_from_query.keys())))
logging.info("Retrieved relations: {}".format(list(rels_from_txt.keys())))
logging.info("Retrieved entities from types({}): {}".format(ty_kwds, list(ents_from_types.keys())))
logging.info("Retrieved N-hops: {}".format(list(nhop_pathes.keys())))
# P(E|Q) => P(E) * P(Q|E) => pagerank * sim
for ent in ents_from_types.keys():
if ent not in ents_from_query:
continue
ents_from_query[ent]["sim"] *= 2
for (f, t) in rels_from_txt.keys():
pair = tuple(sorted([f, t]))
s = 0
if pair in nhop_pathes:
s += nhop_pathes[pair]["sim"]
del nhop_pathes[pair]
if f in ents_from_types:
s += 1
if t in ents_from_types:
s += 1
rels_from_txt[(f, t)]["sim"] *= s + 1
# This is for the relations from n-hop but not by query search
for (f, t) in nhop_pathes.keys():
s = 0
if f in ents_from_types:
s += 1
if t in ents_from_types:
s += 1
rels_from_txt[(f, t)] = {
"sim": nhop_pathes[(f, t)]["sim"] * (s + 1),
"pagerank": nhop_pathes[(f, t)]["pagerank"]
}
ents_from_query = sorted(ents_from_query.items(), key=lambda x: x[1]["sim"] * x[1]["pagerank"], reverse=True)[
:ent_topn]
rels_from_txt = sorted(rels_from_txt.items(), key=lambda x: x[1]["sim"] * x[1]["pagerank"], reverse=True)[
:rel_topn]
ents = []
relas = []
for n, ent in ents_from_query:
ents.append({
"Entity": n,
"Score": "%.2f" % (ent["sim"] * ent["pagerank"]),
"Description": json.loads(ent["description"]).get("description", "") if ent["description"] else ""
})
max_token -= num_tokens_from_string(str(ents[-1]))
if max_token <= 0:
ents = ents[:-1]
break
for (f, t), rel in rels_from_txt:
if not rel.get("description"):
for tid in tenant_ids:
rela = get_relation(tid, kb_ids, f, t)
if rela:
break
else:
continue
rel["description"] = rela["description"]
desc = rel["description"]
try:
desc = json.loads(desc).get("description", "")
except Exception:
pass
relas.append({
"From Entity": f,
"To Entity": t,
"Score": "%.2f" % (rel["sim"] * rel["pagerank"]),
"Description": desc
})
max_token -= num_tokens_from_string(str(relas[-1]))
if max_token <= 0:
relas = relas[:-1]
break
if ents:
ents = "\n---- Entities ----\n{}".format(pd.DataFrame(ents).to_csv())
else:
ents = ""
if relas:
relas = "\n---- Relations ----\n{}".format(pd.DataFrame(relas).to_csv())
else:
relas = ""
return {
"chunk_id": get_uuid(),
"content_ltks": "",
"content_with_weight": ents + relas + self._community_retrieval_([n for n, _ in ents_from_query], filters, kb_ids, idxnms,
comm_topn, max_token),
"doc_id": "",
"docnm_kwd": "Related content in Knowledge Graph",
"kb_id": kb_ids,
"important_kwd": [],
"image_id": "",
"similarity": 1.,
"vector_similarity": 1.,
"term_similarity": 0,
"vector": [],
"positions": [],
}
def _community_retrieval_(self, entities, condition, kb_ids, idxnms, topn, max_token):
## Community retrieval
fields = ["docnm_kwd", "content_with_weight"]
odr = OrderByExpr()
odr.desc("weight_flt")
fltr = deepcopy(condition)
fltr["knowledge_graph_kwd"] = "community_report"
fltr["entities_kwd"] = entities
comm_res = self.dataStore.search(fields, [], fltr, [],
OrderByExpr(), 0, topn, idxnms, kb_ids)
comm_res_fields = self.dataStore.get_fields(comm_res, fields)
txts = []
for ii, (_, row) in enumerate(comm_res_fields.items()):
obj = json.loads(row["content_with_weight"])
txts.append("# {}. {}\n## Content\n{}\n## Evidences\n{}\n".format(
ii + 1, row["docnm_kwd"], obj["report"], obj["evidences"]))
max_token -= num_tokens_from_string(str(txts[-1]))
if not txts:
return ""
return "\n---- Community Report ----\n" + "\n".join(txts)
if __name__ == "__main__":
import argparse
from common.constants import LLMType
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.llm_service import LLMBundle
from api.db.services.user_service import TenantService
from rag.nlp import search
settings.init_settings()
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
parser.add_argument('-d', '--kb_id', default=False, help="Knowledge base ID", action='store', required=True)
parser.add_argument('-q', '--question', default=False, help="Question", action='store', required=True)
args = parser.parse_args()
kb_id = args.kb_id
_, tenant = TenantService.get_by_id(args.tenant_id)
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
_, kb = KnowledgebaseService.get_by_id(kb_id)
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
kg = KGSearch(settings.docStoreConn)
print(asyncio.run(kg.retrieval({"question": args.question, "kb_ids": [kb_id]},
search.index_name(kb.tenant_id), [kb_id], embed_bdl, llm_bdl)))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/query_analyze_prompt.py | graphrag/query_analyze_prompt.py | # Licensed under the MIT License
"""
Reference:
- [LightRag](https://github.com/HKUDS/LightRAG)
- [MiniRAG](https://github.com/HKUDS/MiniRAG)
"""
PROMPTS = {}
PROMPTS["minirag_query2kwd"] = """---Role---
You are a helpful assistant tasked with identifying both answer-type and low-level keywords in the user's query.
---Goal---
Given the query, list both answer-type and low-level keywords.
answer_type_keywords focus on the type of the answer to the certain query, while low-level keywords focus on specific entities, details, or concrete terms.
The answer_type_keywords must be selected from Answer type pool.
This pool is in the form of a dictionary, where the key represents the Type you should choose from and the value represents the example samples.
---Instructions---
- Output the keywords in JSON format.
- The JSON should have three keys:
- "answer_type_keywords" for the types of the answer. In this list, the types with the highest likelihood should be placed at the forefront. No more than 3.
- "entities_from_query" for specific entities or details. It must be extracted from the query.
######################
-Examples-
######################
Example 1:
Query: "How does international trade influence global economic stability?"
Answer type pool: {{
'PERSONAL LIFE': ['FAMILY TIME', 'HOME MAINTENANCE'],
'STRATEGY': ['MARKETING PLAN', 'BUSINESS EXPANSION'],
'SERVICE FACILITATION': ['ONLINE SUPPORT', 'CUSTOMER SERVICE TRAINING'],
'PERSON': ['JANE DOE', 'JOHN SMITH'],
'FOOD': ['PASTA', 'SUSHI'],
'EMOTION': ['HAPPINESS', 'ANGER'],
'PERSONAL EXPERIENCE': ['TRAVEL ABROAD', 'STUDYING ABROAD'],
'INTERACTION': ['TEAM MEETING', 'NETWORKING EVENT'],
'BEVERAGE': ['COFFEE', 'TEA'],
'PLAN': ['ANNUAL BUDGET', 'PROJECT TIMELINE'],
'GEO': ['NEW YORK CITY', 'SOUTH AFRICA'],
'GEAR': ['CAMPING TENT', 'CYCLING HELMET'],
'EMOJI': ['🎉', '🚀'],
'BEHAVIOR': ['POSITIVE FEEDBACK', 'NEGATIVE CRITICISM'],
'TONE': ['FORMAL', 'INFORMAL'],
'LOCATION': ['DOWNTOWN', 'SUBURBS']
}}
################
Output:
{{
"answer_type_keywords": ["STRATEGY","PERSONAL LIFE"],
"entities_from_query": ["Trade agreements", "Tariffs", "Currency exchange", "Imports", "Exports"]
}}
#############################
Example 2:
Query: "When was SpaceX's first rocket launch?"
Answer type pool: {{
'DATE AND TIME': ['2023-10-10 10:00', 'THIS AFTERNOON'],
'ORGANIZATION': ['GLOBAL INITIATIVES CORPORATION', 'LOCAL COMMUNITY CENTER'],
'PERSONAL LIFE': ['DAILY EXERCISE ROUTINE', 'FAMILY VACATION PLANNING'],
'STRATEGY': ['NEW PRODUCT LAUNCH', 'YEAR-END SALES BOOST'],
'SERVICE FACILITATION': ['REMOTE IT SUPPORT', 'ON-SITE TRAINING SESSIONS'],
'PERSON': ['ALEXANDER HAMILTON', 'MARIA CURIE'],
'FOOD': ['GRILLED SALMON', 'VEGETARIAN BURRITO'],
'EMOTION': ['EXCITEMENT', 'DISAPPOINTMENT'],
'PERSONAL EXPERIENCE': ['BIRTHDAY CELEBRATION', 'FIRST MARATHON'],
'INTERACTION': ['OFFICE WATER COOLER CHAT', 'ONLINE FORUM DEBATE'],
'BEVERAGE': ['ICED COFFEE', 'GREEN SMOOTHIE'],
'PLAN': ['WEEKLY MEETING SCHEDULE', 'MONTHLY BUDGET OVERVIEW'],
'GEO': ['MOUNT EVEREST BASE CAMP', 'THE GREAT BARRIER REEF'],
'GEAR': ['PROFESSIONAL CAMERA EQUIPMENT', 'OUTDOOR HIKING GEAR'],
'EMOJI': ['📅', '⏰'],
'BEHAVIOR': ['PUNCTUALITY', 'HONESTY'],
'TONE': ['CONFIDENTIAL', 'SATIRICAL'],
'LOCATION': ['CENTRAL PARK', 'DOWNTOWN LIBRARY']
}}
################
Output:
{{
"answer_type_keywords": ["DATE AND TIME", "ORGANIZATION", "PLAN"],
"entities_from_query": ["SpaceX", "Rocket launch", "Aerospace", "Power Recovery"]
}}
#############################
Example 3:
Query: "What is the role of education in reducing poverty?"
Answer type pool: {{
'PERSONAL LIFE': ['MANAGING WORK-LIFE BALANCE', 'HOME IMPROVEMENT PROJECTS'],
'STRATEGY': ['MARKETING STRATEGIES FOR Q4', 'EXPANDING INTO NEW MARKETS'],
'SERVICE FACILITATION': ['CUSTOMER SATISFACTION SURVEYS', 'STAFF RETENTION PROGRAMS'],
'PERSON': ['ALBERT EINSTEIN', 'MARIA CALLAS'],
'FOOD': ['PAN-FRIED STEAK', 'POACHED EGGS'],
'EMOTION': ['OVERWHELM', 'CONTENTMENT'],
'PERSONAL EXPERIENCE': ['LIVING ABROAD', 'STARTING A NEW JOB'],
'INTERACTION': ['SOCIAL MEDIA ENGAGEMENT', 'PUBLIC SPEAKING'],
'BEVERAGE': ['CAPPUCCINO', 'MATCHA LATTE'],
'PLAN': ['ANNUAL FITNESS GOALS', 'QUARTERLY BUSINESS REVIEW'],
'GEO': ['THE AMAZON RAINFOREST', 'THE GRAND CANYON'],
'GEAR': ['SURFING ESSENTIALS', 'CYCLING ACCESSORIES'],
'EMOJI': ['💻', '📱'],
'BEHAVIOR': ['TEAMWORK', 'LEADERSHIP'],
'TONE': ['FORMAL MEETING', 'CASUAL CONVERSATION'],
'LOCATION': ['URBAN CITY CENTER', 'RURAL COUNTRYSIDE']
}}
################
Output:
{{
"answer_type_keywords": ["STRATEGY", "PERSON"],
"entities_from_query": ["School access", "Literacy rates", "Job training", "Income inequality"]
}}
#############################
Example 4:
Query: "Where is the capital of the United States?"
Answer type pool: {{
'ORGANIZATION': ['GREENPEACE', 'RED CROSS'],
'PERSONAL LIFE': ['DAILY WORKOUT', 'HOME COOKING'],
'STRATEGY': ['FINANCIAL INVESTMENT', 'BUSINESS EXPANSION'],
'SERVICE FACILITATION': ['ONLINE SUPPORT', 'CUSTOMER SERVICE TRAINING'],
'PERSON': ['ALBERTA SMITH', 'BENJAMIN JONES'],
'FOOD': ['PASTA CARBONARA', 'SUSHI PLATTER'],
'EMOTION': ['HAPPINESS', 'SADNESS'],
'PERSONAL EXPERIENCE': ['TRAVEL ADVENTURE', 'BOOK CLUB'],
'INTERACTION': ['TEAM BUILDING', 'NETWORKING MEETUP'],
'BEVERAGE': ['LATTE', 'GREEN TEA'],
'PLAN': ['WEIGHT LOSS', 'CAREER DEVELOPMENT'],
'GEO': ['PARIS', 'NEW YORK'],
'GEAR': ['CAMERA', 'HEADPHONES'],
'EMOJI': ['🏢', '🌍'],
'BEHAVIOR': ['POSITIVE THINKING', 'STRESS MANAGEMENT'],
'TONE': ['FRIENDLY', 'PROFESSIONAL'],
'LOCATION': ['DOWNTOWN', 'SUBURBS']
}}
################
Output:
{{
"answer_type_keywords": ["LOCATION"],
"entities_from_query": ["capital of the United States", "Washington", "New York"]
}}
#############################
-Real Data-
######################
Query: {query}
Answer type pool:{TYPE_POOL}
######################
Output:
"""
PROMPTS["keywords_extraction"] = """---Role---
You are a helpful assistant tasked with identifying both high-level and low-level keywords in the user's query.
---Goal---
Given the query, list both high-level and low-level keywords. High-level keywords focus on overarching concepts or themes, while low-level keywords focus on specific entities, details, or concrete terms.
---Instructions---
- Output the keywords in JSON format.
- The JSON should have two keys:
- "high_level_keywords" for overarching concepts or themes.
- "low_level_keywords" for specific entities or details.
######################
-Examples-
######################
{examples}
#############################
-Real Data-
######################
Query: {query}
######################
The `Output` should be human text, not unicode characters. Keep the same language as `Query`.
Output:
"""
PROMPTS["keywords_extraction_examples"] = [
"""Example 1:
Query: "How does international trade influence global economic stability?"
################
Output:
{
"high_level_keywords": ["International trade", "Global economic stability", "Economic impact"],
"low_level_keywords": ["Trade agreements", "Tariffs", "Currency exchange", "Imports", "Exports"]
}
#############################""",
"""Example 2:
Query: "What are the environmental consequences of deforestation on biodiversity?"
################
Output:
{
"high_level_keywords": ["Environmental consequences", "Deforestation", "Biodiversity loss"],
"low_level_keywords": ["Species extinction", "Habitat destruction", "Carbon emissions", "Rainforest", "Ecosystem"]
}
#############################""",
"""Example 3:
Query: "What is the role of education in reducing poverty?"
################
Output:
{
"high_level_keywords": ["Education", "Poverty reduction", "Socioeconomic development"],
"low_level_keywords": ["School access", "Literacy rates", "Job training", "Income inequality"]
}
#############################""",
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/entity_resolution_prompt.py | graphrag/entity_resolution_prompt.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ENTITY_RESOLUTION_PROMPT = """
-Goal-
Please answer the following Question as required
-Steps-
1. Identify each line of questioning as required
2. Return output in English as a single list of each line answer in steps 1. Use **{record_delimiter}** as the list delimiter.
######################
-Examples-
######################
Example 1:
Question:
When determining whether two Products are the same, you should only focus on critical properties and overlook noisy factors.
Demonstration 1: name of Product A is : "computer", name of Product B is :"phone" No, Product A and Product B are different products.
Question 1: name of Product A is : "television", name of Product B is :"TV"
Question 2: name of Product A is : "cup", name of Product B is :"mug"
Question 3: name of Product A is : "soccer", name of Product B is :"football"
Question 4: name of Product A is : "pen", name of Product B is :"eraser"
Use domain knowledge of Products to help understand the text and answer the above 4 questions in the format: For Question i, Yes, Product A and Product B are the same product. or No, Product A and Product B are different products. For Question i+1, (repeat the above procedures)
################
Output:
(For question {entity_index_delimiter}1{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, Product A and Product B are different products.){record_delimiter}
(For question {entity_index_delimiter}2{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, Product A and Product B are different products.){record_delimiter}
(For question {entity_index_delimiter}3{entity_index_delimiter}, {resolution_result_delimiter}yes{resolution_result_delimiter}, Product A and Product B are the same product.){record_delimiter}
(For question {entity_index_delimiter}4{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, Product A and Product B are different products.){record_delimiter}
#############################
Example 2:
Question:
When determining whether two toponym are the same, you should only focus on critical properties and overlook noisy factors.
Demonstration 1: name of toponym A is : "nanjing", name of toponym B is :"nanjing city" No, toponym A and toponym B are same toponym.
Question 1: name of toponym A is : "Chicago", name of toponym B is :"ChiTown"
Question 2: name of toponym A is : "Shanghai", name of toponym B is :"Zhengzhou"
Question 3: name of toponym A is : "Beijing", name of toponym B is :"Peking"
Question 4: name of toponym A is : "Los Angeles", name of toponym B is :"Cleveland"
Use domain knowledge of toponym to help understand the text and answer the above 4 questions in the format: For Question i, Yes, toponym A and toponym B are the same toponym. or No, toponym A and toponym B are different toponym. For Question i+1, (repeat the above procedures)
################
Output:
(For question {entity_index_delimiter}1{entity_index_delimiter}, {resolution_result_delimiter}yes{resolution_result_delimiter}, toponym A and toponym B are same toponym.){record_delimiter}
(For question {entity_index_delimiter}2{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, toponym A and toponym B are different toponym.){record_delimiter}
(For question {entity_index_delimiter}3{entity_index_delimiter}, {resolution_result_delimiter}yes{resolution_result_delimiter}, toponym A and toponym B are the same toponym.){record_delimiter}
(For question {entity_index_delimiter}4{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, toponym A and toponym B are different toponym.){record_delimiter}
#############################
-Real Data-
######################
Question:{input_text}
######################
Output:
"""
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/utils.py | graphrag/utils.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [graphrag](https://github.com/microsoft/graphrag)
- [LightRag](https://github.com/HKUDS/LightRAG)
"""
import asyncio
import dataclasses
import html
import json
import logging
import os
import re
import time
from collections import defaultdict
from hashlib import md5
from typing import Any, Callable, Set, Tuple
import networkx as nx
import numpy as np
import xxhash
from networkx.readwrite import json_graph
from common.misc_utils import get_uuid
from common.connection_utils import timeout
from rag.nlp import rag_tokenizer, search
from rag.utils.redis_conn import REDIS_CONN
from common import settings
from common.doc_store.doc_store_base import OrderByExpr
GRAPH_FIELD_SEP = "<SEP>"
ErrorHandlerFn = Callable[[BaseException | None, str | None, dict | None], None]
chat_limiter = asyncio.Semaphore(int(os.environ.get("MAX_CONCURRENT_CHATS", 10)))
@dataclasses.dataclass
class GraphChange:
removed_nodes: Set[str] = dataclasses.field(default_factory=set)
added_updated_nodes: Set[str] = dataclasses.field(default_factory=set)
removed_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
added_updated_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
def perform_variable_replacements(input: str, history: list[dict] | None = None, variables: dict | None = None) -> str:
"""Perform variable replacements on the input string and in a chat log."""
if history is None:
history = []
if variables is None:
variables = {}
result = input
def replace_all(input: str) -> str:
result = input
for k, v in variables.items():
result = result.replace(f"{{{k}}}", str(v))
return result
result = replace_all(result)
for i, entry in enumerate(history):
if entry.get("role") == "system":
entry["content"] = replace_all(entry.get("content") or "")
return result
def clean_str(input: Any) -> str:
"""Clean an input string by removing HTML escapes, control characters, and other unwanted characters."""
# If we get non-string input, just give it back
if not isinstance(input, str):
return input
result = html.unescape(input.strip())
# https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python
return re.sub(r"[\"\x00-\x1f\x7f-\x9f]", "", result)
def dict_has_keys_with_types(data: dict, expected_fields: list[tuple[str, type]]) -> bool:
"""Return True if the given dictionary has the given keys with the given types."""
for field, field_type in expected_fields:
if field not in data:
return False
value = data[field]
if not isinstance(value, field_type):
return False
return True
def get_llm_cache(llmnm, txt, history, genconf):
hasher = xxhash.xxh64()
hasher.update((str(llmnm)+str(txt)+str(history)+str(genconf)).encode("utf-8"))
k = hasher.hexdigest()
bin = REDIS_CONN.get(k)
if not bin:
return None
return bin
def set_llm_cache(llmnm, txt, v, history, genconf):
hasher = xxhash.xxh64()
hasher.update((str(llmnm)+str(txt)+str(history)+str(genconf)).encode("utf-8"))
k = hasher.hexdigest()
REDIS_CONN.set(k, v.encode("utf-8"), 24 * 3600)
def get_embed_cache(llmnm, txt):
hasher = xxhash.xxh64()
hasher.update(str(llmnm).encode("utf-8"))
hasher.update(str(txt).encode("utf-8"))
k = hasher.hexdigest()
bin = REDIS_CONN.get(k)
if not bin:
return
return np.array(json.loads(bin))
def set_embed_cache(llmnm, txt, arr):
hasher = xxhash.xxh64()
hasher.update(str(llmnm).encode("utf-8"))
hasher.update(str(txt).encode("utf-8"))
k = hasher.hexdigest()
arr = json.dumps(arr.tolist() if isinstance(arr, np.ndarray) else arr)
REDIS_CONN.set(k, arr.encode("utf-8"), 24 * 3600)
def get_tags_from_cache(kb_ids):
hasher = xxhash.xxh64()
hasher.update(str(kb_ids).encode("utf-8"))
k = hasher.hexdigest()
bin = REDIS_CONN.get(k)
if not bin:
return
return bin
def set_tags_to_cache(kb_ids, tags):
hasher = xxhash.xxh64()
hasher.update(str(kb_ids).encode("utf-8"))
k = hasher.hexdigest()
REDIS_CONN.set(k, json.dumps(tags).encode("utf-8"), 600)
def tidy_graph(graph: nx.Graph, callback, check_attribute: bool = True):
"""
Ensure all nodes and edges in the graph have some essential attribute.
"""
def is_valid_item(node_attrs: dict) -> bool:
valid_node = True
for attr in ["description", "source_id"]:
if attr not in node_attrs:
valid_node = False
break
return valid_node
if check_attribute:
purged_nodes = []
for node, node_attrs in graph.nodes(data=True):
if not is_valid_item(node_attrs):
purged_nodes.append(node)
for node in purged_nodes:
graph.remove_node(node)
if purged_nodes and callback:
callback(msg=f"Purged {len(purged_nodes)} nodes from graph due to missing essential attributes.")
purged_edges = []
for source, target, attr in graph.edges(data=True):
if check_attribute:
if not is_valid_item(attr):
purged_edges.append((source, target))
if "keywords" not in attr:
attr["keywords"] = []
for source, target in purged_edges:
graph.remove_edge(source, target)
if purged_edges and callback:
callback(msg=f"Purged {len(purged_edges)} edges from graph due to missing essential attributes.")
def get_from_to(node1, node2):
if node1 < node2:
return (node1, node2)
else:
return (node2, node1)
def graph_merge(g1: nx.Graph, g2: nx.Graph, change: GraphChange):
"""Merge graph g2 into g1 in place."""
for node_name, attr in g2.nodes(data=True):
change.added_updated_nodes.add(node_name)
if not g1.has_node(node_name):
g1.add_node(node_name, **attr)
continue
node = g1.nodes[node_name]
node["description"] += GRAPH_FIELD_SEP + attr["description"]
# A node's source_id indicates which chunks it came from.
node["source_id"] += attr["source_id"]
for source, target, attr in g2.edges(data=True):
change.added_updated_edges.add(get_from_to(source, target))
edge = g1.get_edge_data(source, target)
if edge is None:
g1.add_edge(source, target, **attr)
continue
edge["weight"] += attr.get("weight", 0)
edge["description"] += GRAPH_FIELD_SEP + attr["description"]
edge["keywords"] += attr["keywords"]
# A edge's source_id indicates which chunks it came from.
edge["source_id"] += attr["source_id"]
for node_degree in g1.degree:
g1.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
# A graph's source_id indicates which documents it came from.
if "source_id" not in g1.graph:
g1.graph["source_id"] = []
g1.graph["source_id"] += g2.graph.get("source_id", [])
return g1
def compute_args_hash(*args):
return md5(str(args).encode()).hexdigest()
def handle_single_entity_extraction(
record_attributes: list[str],
chunk_key: str,
):
if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
return None
# add this record as a node in the G
entity_name = clean_str(record_attributes[1].upper())
if not entity_name.strip():
return None
entity_type = clean_str(record_attributes[2].upper())
entity_description = clean_str(record_attributes[3])
entity_source_id = chunk_key
return dict(
entity_name=entity_name.upper(),
entity_type=entity_type.upper(),
description=entity_description,
source_id=entity_source_id,
)
def handle_single_relationship_extraction(record_attributes: list[str], chunk_key: str):
if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
return None
# add this record as edge
source = clean_str(record_attributes[1].upper())
target = clean_str(record_attributes[2].upper())
edge_description = clean_str(record_attributes[3])
edge_keywords = clean_str(record_attributes[4])
edge_source_id = chunk_key
weight = float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
pair = sorted([source.upper(), target.upper()])
return dict(
src_id=pair[0],
tgt_id=pair[1],
weight=weight,
description=edge_description,
keywords=edge_keywords,
source_id=edge_source_id,
metadata={"created_at": time.time()},
)
def pack_user_ass_to_openai_messages(*args: str):
roles = ["user", "assistant"]
return [{"role": roles[i % 2], "content": content} for i, content in enumerate(args)]
def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
"""Split a string by multiple markers"""
if not markers:
return [content]
results = re.split("|".join(re.escape(marker) for marker in markers), content)
return [r.strip() for r in results if r.strip()]
def is_float_regex(value):
return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
def chunk_id(chunk):
return xxhash.xxh64((chunk["content_with_weight"] + chunk["kb_id"]).encode("utf-8")).hexdigest()
async def graph_node_to_chunk(kb_id, embd_mdl, ent_name, meta, chunks):
global chat_limiter
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
chunk = {
"id": get_uuid(),
"important_kwd": [ent_name],
"title_tks": rag_tokenizer.tokenize(ent_name),
"entity_kwd": ent_name,
"knowledge_graph_kwd": "entity",
"entity_type_kwd": meta["entity_type"],
"content_with_weight": json.dumps(meta, ensure_ascii=False),
"content_ltks": rag_tokenizer.tokenize(meta["description"]),
"source_id": meta["source_id"],
"kb_id": kb_id,
"available_int": 0,
}
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
ebd = get_embed_cache(embd_mdl.llm_name, ent_name)
if ebd is None:
async with chat_limiter:
timeout = 3 if enable_timeout_assertion else 30000000
ebd, _ = await asyncio.wait_for(
asyncio.to_thread(embd_mdl.encode, [ent_name]),
timeout=timeout
)
ebd = ebd[0]
set_embed_cache(embd_mdl.llm_name, ent_name, ebd)
assert ebd is not None
chunk["q_%d_vec" % len(ebd)] = ebd
chunks.append(chunk)
@timeout(3, 3)
def get_relation(tenant_id, kb_id, from_ent_name, to_ent_name, size=1):
ents = from_ent_name
if isinstance(ents, str):
ents = [from_ent_name]
if isinstance(to_ent_name, str):
to_ent_name = [to_ent_name]
ents.extend(to_ent_name)
ents = list(set(ents))
conds = {"fields": ["content_with_weight"], "size": size, "from_entity_kwd": ents, "to_entity_kwd": ents, "knowledge_graph_kwd": ["relation"]}
res = []
es_res = settings.retriever.search(conds, search.index_name(tenant_id), [kb_id] if isinstance(kb_id, str) else kb_id)
for id in es_res.ids:
try:
if size == 1:
return json.loads(es_res.field[id]["content_with_weight"])
res.append(json.loads(es_res.field[id]["content_with_weight"]))
except Exception:
continue
return res
async def graph_edge_to_chunk(kb_id, embd_mdl, from_ent_name, to_ent_name, meta, chunks):
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
chunk = {
"id": get_uuid(),
"from_entity_kwd": from_ent_name,
"to_entity_kwd": to_ent_name,
"knowledge_graph_kwd": "relation",
"content_with_weight": json.dumps(meta, ensure_ascii=False),
"content_ltks": rag_tokenizer.tokenize(meta["description"]),
"important_kwd": meta["keywords"],
"source_id": meta["source_id"],
"weight_int": int(meta["weight"]),
"kb_id": kb_id,
"available_int": 0,
}
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
txt = f"{from_ent_name}->{to_ent_name}"
ebd = get_embed_cache(embd_mdl.llm_name, txt)
if ebd is None:
async with chat_limiter:
timeout = 3 if enable_timeout_assertion else 300000000
ebd, _ = await asyncio.wait_for(
asyncio.to_thread(
embd_mdl.encode,
[txt + f": {meta['description']}"]
),
timeout=timeout
)
ebd = ebd[0]
set_embed_cache(embd_mdl.llm_name, txt, ebd)
assert ebd is not None
chunk["q_%d_vec" % len(ebd)] = ebd
chunks.append(chunk)
async def does_graph_contains(tenant_id, kb_id, doc_id):
# Get doc_ids of graph
fields = ["source_id"]
condition = {
"knowledge_graph_kwd": ["graph"],
"removed_kwd": "N",
}
res = await asyncio.to_thread(
settings.docStoreConn.search,
fields, [], condition, [], OrderByExpr(),
0, 1, search.index_name(tenant_id), [kb_id]
)
fields2 = settings.docStoreConn.get_fields(res, fields)
graph_doc_ids = set()
for chunk_id in fields2.keys():
graph_doc_ids = set(fields2[chunk_id]["source_id"])
return doc_id in graph_doc_ids
async def get_graph_doc_ids(tenant_id, kb_id) -> list[str]:
conds = {"fields": ["source_id"], "removed_kwd": "N", "size": 1, "knowledge_graph_kwd": ["graph"]}
res = await asyncio.to_thread(
settings.retriever.search,
conds,
search.index_name(tenant_id),
[kb_id]
)
doc_ids = []
if res.total == 0:
return doc_ids
for id in res.ids:
doc_ids = res.field[id]["source_id"]
return doc_ids
async def get_graph(tenant_id, kb_id, exclude_rebuild=None):
conds = {"fields": ["content_with_weight", "removed_kwd", "source_id"], "size": 1, "knowledge_graph_kwd": ["graph"]}
res = await asyncio.to_thread(
settings.retriever.search,
conds,
search.index_name(tenant_id),
[kb_id]
)
if not res.total == 0:
for id in res.ids:
try:
if res.field[id]["removed_kwd"] == "N":
g = json_graph.node_link_graph(json.loads(res.field[id]["content_with_weight"]), edges="edges")
if "source_id" not in g.graph:
g.graph["source_id"] = res.field[id]["source_id"]
else:
g = await rebuild_graph(tenant_id, kb_id, exclude_rebuild)
return g
except Exception:
continue
result = None
return result
async def set_graph(tenant_id: str, kb_id: str, embd_mdl, graph: nx.Graph, change: GraphChange, callback):
global chat_limiter
start = asyncio.get_running_loop().time()
await asyncio.to_thread(
settings.docStoreConn.delete,
{"knowledge_graph_kwd": ["graph", "subgraph"]},
search.index_name(tenant_id),
kb_id
)
if change.removed_nodes:
await asyncio.to_thread(
settings.docStoreConn.delete,
{"knowledge_graph_kwd": ["entity"], "entity_kwd": sorted(change.removed_nodes)},
search.index_name(tenant_id),
kb_id
)
if change.removed_edges:
async def del_edges(from_node, to_node):
async with chat_limiter:
await asyncio.to_thread(
settings.docStoreConn.delete,
{"knowledge_graph_kwd": ["relation"], "from_entity_kwd": from_node, "to_entity_kwd": to_node},
search.index_name(tenant_id),
kb_id
)
tasks = []
for from_node, to_node in change.removed_edges:
tasks.append(asyncio.create_task(del_edges(from_node, to_node)))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error while deleting edges: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
now = asyncio.get_running_loop().time()
if callback:
callback(msg=f"set_graph removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges from index in {now - start:.2f}s.")
start = now
chunks = [
{
"id": get_uuid(),
"content_with_weight": json.dumps(nx.node_link_data(graph, edges="edges"), ensure_ascii=False),
"knowledge_graph_kwd": "graph",
"kb_id": kb_id,
"source_id": graph.graph.get("source_id", []),
"available_int": 0,
"removed_kwd": "N",
}
]
# generate updated subgraphs
for source in graph.graph["source_id"]:
subgraph = graph.subgraph([n for n in graph.nodes if source in graph.nodes[n]["source_id"]]).copy()
subgraph.graph["source_id"] = [source]
for n in subgraph.nodes:
subgraph.nodes[n]["source_id"] = [source]
chunks.append(
{
"id": get_uuid(),
"content_with_weight": json.dumps(nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False),
"knowledge_graph_kwd": "subgraph",
"kb_id": kb_id,
"source_id": [source],
"available_int": 0,
"removed_kwd": "N",
}
)
tasks = []
for ii, node in enumerate(change.added_updated_nodes):
node_attrs = graph.nodes[node]
tasks.append(asyncio.create_task(
graph_node_to_chunk(kb_id, embd_mdl, node, node_attrs, chunks)
))
if ii % 100 == 9 and callback:
callback(msg=f"Get embedding of nodes: {ii}/{len(change.added_updated_nodes)}")
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in get_embedding_of_nodes: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
tasks = []
for ii, (from_node, to_node) in enumerate(change.added_updated_edges):
edge_attrs = graph.get_edge_data(from_node, to_node)
if not edge_attrs:
continue
tasks.append(asyncio.create_task(
graph_edge_to_chunk(kb_id, embd_mdl, from_node, to_node, edge_attrs, chunks)
))
if ii % 100 == 9 and callback:
callback(msg=f"Get embedding of edges: {ii}/{len(change.added_updated_edges)}")
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in get_embedding_of_edges: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
now = asyncio.get_running_loop().time()
if callback:
callback(msg=f"set_graph converted graph change to {len(chunks)} chunks in {now - start:.2f}s.")
start = now
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
es_bulk_size = 4
for b in range(0, len(chunks), es_bulk_size):
timeout = 3 if enable_timeout_assertion else 30000000
doc_store_result = await asyncio.wait_for(
asyncio.to_thread(
settings.docStoreConn.insert,
chunks[b : b + es_bulk_size],
search.index_name(tenant_id),
kb_id
),
timeout=timeout
)
if b % 100 == es_bulk_size and callback:
callback(msg=f"Insert chunks: {b}/{len(chunks)}")
if doc_store_result:
error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
raise Exception(error_message)
now = asyncio.get_running_loop().time()
if callback:
callback(msg=f"set_graph added/updated {len(change.added_updated_nodes)} nodes and {len(change.added_updated_edges)} edges from index in {now - start:.2f}s.")
def is_continuous_subsequence(subseq, seq):
def find_all_indexes(tup, value):
indexes = []
start = 0
while True:
try:
index = tup.index(value, start)
indexes.append(index)
start = index + 1
except ValueError:
break
return indexes
index_list = find_all_indexes(seq, subseq[0])
for idx in index_list:
if idx != len(seq) - 1:
if seq[idx + 1] == subseq[-1]:
return True
return False
def merge_tuples(list1, list2):
result = []
for tup in list1:
last_element = tup[-1]
if last_element in tup[:-1]:
result.append(tup)
else:
matching_tuples = [t for t in list2 if t[0] == last_element]
already_match_flag = 0
for match in matching_tuples:
matchh = (match[1], match[0])
if is_continuous_subsequence(match, tup) or is_continuous_subsequence(matchh, tup):
continue
already_match_flag = 1
merged_tuple = tup + match[1:]
result.append(merged_tuple)
if not already_match_flag:
result.append(tup)
return result
def get_entity_type2samples(idxnms, kb_ids: list):
es_res = settings.retriever.search({"knowledge_graph_kwd": "ty2ents", "kb_id": kb_ids, "size": 10000, "fields": ["content_with_weight"]},idxnms,kb_ids)
res = defaultdict(list)
for id in es_res.ids:
smp = es_res.field[id].get("content_with_weight")
if not smp:
continue
try:
smp = json.loads(smp)
except Exception as e:
logging.exception(e)
for ty, ents in smp.items():
res[ty].extend(ents)
return res
def flat_uniq_list(arr, key):
res = []
for a in arr:
a = a[key]
if isinstance(a, list):
res.extend(a)
else:
res.append(a)
return list(set(res))
async def rebuild_graph(tenant_id, kb_id, exclude_rebuild=None):
graph = nx.Graph()
flds = ["knowledge_graph_kwd", "content_with_weight", "source_id"]
bs = 256
for i in range(0, 1024 * bs, bs):
es_res = await asyncio.to_thread(
settings.docStoreConn.search,
flds, [], {"kb_id": kb_id, "knowledge_graph_kwd": ["subgraph"]},
[], OrderByExpr(), i, bs, search.index_name(tenant_id), [kb_id]
)
# tot = settings.docStoreConn.get_total(es_res)
es_res = settings.docStoreConn.get_fields(es_res, flds)
if len(es_res) == 0:
break
for id, d in es_res.items():
assert d["knowledge_graph_kwd"] == "subgraph"
if isinstance(exclude_rebuild, list):
if sum([n in d["source_id"] for n in exclude_rebuild]):
continue
elif exclude_rebuild in d["source_id"]:
continue
next_graph = json_graph.node_link_graph(json.loads(d["content_with_weight"]), edges="edges")
merged_graph = nx.compose(graph, next_graph)
merged_source = {n: graph.nodes[n]["source_id"] + next_graph.nodes[n]["source_id"] for n in graph.nodes & next_graph.nodes}
nx.set_node_attributes(merged_graph, merged_source, "source_id")
if "source_id" in graph.graph:
merged_graph.graph["source_id"] = graph.graph["source_id"] + next_graph.graph["source_id"]
else:
merged_graph.graph["source_id"] = next_graph.graph["source_id"]
graph = merged_graph
if len(graph.nodes) == 0:
return None
graph.graph["source_id"] = sorted(graph.graph["source_id"])
return graph
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/__init__.py | graphrag/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/entity_resolution.py | graphrag/entity_resolution.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import itertools
import os
import re
from dataclasses import dataclass
from typing import Any, Callable
import networkx as nx
from graphrag.general.extractor import Extractor
from rag.nlp import is_english
import editdistance
from graphrag.entity_resolution_prompt import ENTITY_RESOLUTION_PROMPT
from rag.llm.chat_model import Base as CompletionLLM
from graphrag.utils import perform_variable_replacements, chat_limiter, GraphChange
from api.db.services.task_service import has_canceled
from common.exceptions import TaskCanceledException
DEFAULT_RECORD_DELIMITER = "##"
DEFAULT_ENTITY_INDEX_DELIMITER = "<|>"
DEFAULT_RESOLUTION_RESULT_DELIMITER = "&&"
@dataclass
class EntityResolutionResult:
"""Entity resolution result class definition."""
graph: nx.Graph
change: GraphChange
class EntityResolution(Extractor):
"""Entity resolution class definition."""
_resolution_prompt: str
_output_formatter_prompt: str
_record_delimiter_key: str
_entity_index_delimiter_key: str
_resolution_result_delimiter_key: str
def __init__(
self,
llm_invoker: CompletionLLM,
):
super().__init__(llm_invoker)
"""Init method definition."""
self._llm = llm_invoker
self._resolution_prompt = ENTITY_RESOLUTION_PROMPT
self._record_delimiter_key = "record_delimiter"
self._entity_index_delimiter_key = "entity_index_delimiter"
self._resolution_result_delimiter_key = "resolution_result_delimiter"
self._input_text_key = "input_text"
async def __call__(self, graph: nx.Graph,
subgraph_nodes: set[str],
prompt_variables: dict[str, Any] | None = None,
callback: Callable | None = None,
task_id: str = "") -> EntityResolutionResult:
"""Call method definition."""
if prompt_variables is None:
prompt_variables = {}
# Wire defaults into the prompt variables
self.prompt_variables = {
**prompt_variables,
self._record_delimiter_key: prompt_variables.get(self._record_delimiter_key)
or DEFAULT_RECORD_DELIMITER,
self._entity_index_delimiter_key: prompt_variables.get(self._entity_index_delimiter_key)
or DEFAULT_ENTITY_INDEX_DELIMITER,
self._resolution_result_delimiter_key: prompt_variables.get(self._resolution_result_delimiter_key)
or DEFAULT_RESOLUTION_RESULT_DELIMITER,
}
nodes = sorted(graph.nodes())
entity_types = sorted(set(graph.nodes[node].get('entity_type', '-') for node in nodes))
node_clusters = {entity_type: [] for entity_type in entity_types}
for node in nodes:
node_clusters[graph.nodes[node].get('entity_type', '-')].append(node)
candidate_resolution = {entity_type: [] for entity_type in entity_types}
for k, v in node_clusters.items():
candidate_resolution[k] = [(a, b) for a, b in itertools.combinations(v, 2) if (a in subgraph_nodes or b in subgraph_nodes) and self.is_similarity(a, b)]
num_candidates = sum([len(candidates) for _, candidates in candidate_resolution.items()])
callback(msg=f"Identified {num_candidates} candidate pairs")
remain_candidates_to_resolve = num_candidates
resolution_result = set()
resolution_result_lock = asyncio.Lock()
resolution_batch_size = 100
max_concurrent_tasks = 5
semaphore = asyncio.Semaphore(max_concurrent_tasks)
async def limited_resolve_candidate(candidate_batch, result_set, result_lock):
nonlocal remain_candidates_to_resolve, callback
async with semaphore:
try:
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
timeout_sec = 280 if enable_timeout_assertion else 1_000_000_000
try:
await asyncio.wait_for(
self._resolve_candidate(candidate_batch, result_set, result_lock, task_id),
timeout=timeout_sec
)
remain_candidates_to_resolve -= len(candidate_batch[1])
callback(
msg=f"Resolved {len(candidate_batch[1])} pairs, "
f"{remain_candidates_to_resolve} remain."
)
except asyncio.TimeoutError:
logging.warning(f"Timeout resolving {candidate_batch}, skipping...")
remain_candidates_to_resolve -= len(candidate_batch[1])
callback(
msg=f"Failed to resolve {len(candidate_batch[1])} pairs due to timeout, skipped. "
f"{remain_candidates_to_resolve} remain."
)
except Exception as exception:
logging.error(f"Error resolving candidate batch: {exception}")
tasks = []
for key, lst in candidate_resolution.items():
if not lst:
continue
for i in range(0, len(lst), resolution_batch_size):
batch = (key, lst[i:i + resolution_batch_size])
tasks.append(limited_resolve_candidate(batch, resolution_result, resolution_result_lock))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error resolving candidate pairs: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
callback(msg=f"Resolved {num_candidates} candidate pairs, {len(resolution_result)} of them are selected to merge.")
change = GraphChange()
connect_graph = nx.Graph()
connect_graph.add_edges_from(resolution_result)
async def limited_merge_nodes(graph, nodes, change):
async with semaphore:
await self._merge_graph_nodes(graph, nodes, change, task_id)
tasks = []
for sub_connect_graph in nx.connected_components(connect_graph):
merging_nodes = list(sub_connect_graph)
tasks.append(asyncio.create_task(limited_merge_nodes(graph, merging_nodes, change))
)
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error merging nodes: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
# Update pagerank
pr = nx.pagerank(graph)
for node_name, pagerank in pr.items():
graph.nodes[node_name]["pagerank"] = pagerank
return EntityResolutionResult(
graph=graph,
change=change,
)
async def _resolve_candidate(self, candidate_resolution_i: tuple[str, list[tuple[str, str]]], resolution_result: set[str], resolution_result_lock: asyncio.Lock, task_id: str = ""):
if task_id:
if has_canceled(task_id):
logging.info(f"Task {task_id} cancelled during entity resolution candidate processing.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
pair_txt = [
f'When determining whether two {candidate_resolution_i[0]}s are the same, you should only focus on critical properties and overlook noisy factors.\n']
for index, candidate in enumerate(candidate_resolution_i[1]):
pair_txt.append(
f'Question {index + 1}: name of{candidate_resolution_i[0]} A is {candidate[0]} ,name of{candidate_resolution_i[0]} B is {candidate[1]}')
sent = 'question above' if len(pair_txt) == 1 else f'above {len(pair_txt)} questions'
pair_txt.append(
f'\nUse domain knowledge of {candidate_resolution_i[0]}s to help understand the text and answer the {sent} in the format: For Question i, Yes, {candidate_resolution_i[0]} A and {candidate_resolution_i[0]} B are the same {candidate_resolution_i[0]}./No, {candidate_resolution_i[0]} A and {candidate_resolution_i[0]} B are different {candidate_resolution_i[0]}s. For Question i+1, (repeat the above procedures)')
pair_prompt = '\n'.join(pair_txt)
variables = {
**self.prompt_variables,
self._input_text_key: pair_prompt
}
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
logging.info(f"Created resolution prompt {len(text)} bytes for {len(candidate_resolution_i[1])} entity pairs of type {candidate_resolution_i[0]}")
async with chat_limiter:
timeout_seconds = 280 if os.environ.get("ENABLE_TIMEOUT_ASSERTION") else 1000000000
try:
response = await asyncio.wait_for(
asyncio.to_thread(
self._chat,
text,
[{"role": "user", "content": "Output:"}],
{},
task_id
),
timeout=timeout_seconds,
)
except asyncio.TimeoutError:
logging.warning("_resolve_candidate._chat timeout, skipping...")
return
except Exception as e:
logging.error(f"_resolve_candidate._chat failed: {e}")
return
logging.debug(f"_resolve_candidate chat prompt: {text}\nchat response: {response}")
result = self._process_results(len(candidate_resolution_i[1]), response,
self.prompt_variables.get(self._record_delimiter_key,
DEFAULT_RECORD_DELIMITER),
self.prompt_variables.get(self._entity_index_delimiter_key,
DEFAULT_ENTITY_INDEX_DELIMITER),
self.prompt_variables.get(self._resolution_result_delimiter_key,
DEFAULT_RESOLUTION_RESULT_DELIMITER))
async with resolution_result_lock:
for result_i in result:
resolution_result.add(candidate_resolution_i[1][result_i[0] - 1])
def _process_results(
self,
records_length: int,
results: str,
record_delimiter: str,
entity_index_delimiter: str,
resolution_result_delimiter: str
) -> list:
ans_list = []
records = [r.strip() for r in results.split(record_delimiter)]
for record in records:
pattern_int = fr"{re.escape(entity_index_delimiter)}(\d+){re.escape(entity_index_delimiter)}"
match_int = re.search(pattern_int, record)
res_int = int(str(match_int.group(1) if match_int else '0'))
if res_int > records_length:
continue
pattern_bool = f"{re.escape(resolution_result_delimiter)}([a-zA-Z]+){re.escape(resolution_result_delimiter)}"
match_bool = re.search(pattern_bool, record)
res_bool = str(match_bool.group(1) if match_bool else '')
if res_int and res_bool:
if res_bool.lower() == 'yes':
ans_list.append((res_int, "yes"))
return ans_list
def _has_digit_in_2gram_diff(self, a, b):
def to_2gram_set(s):
return {s[i:i+2] for i in range(len(s) - 1)}
set_a = to_2gram_set(a)
set_b = to_2gram_set(b)
diff = set_a ^ set_b
return any(any(c.isdigit() for c in pair) for pair in diff)
def is_similarity(self, a, b):
if self._has_digit_in_2gram_diff(a, b):
return False
if is_english(a) and is_english(b):
if editdistance.eval(a, b) <= min(len(a), len(b)) // 2:
return True
return False
a, b = set(a), set(b)
max_l = max(len(a), len(b))
if max_l < 4:
return len(a & b) > 1
return len(a & b)*1./max_l >= 0.8
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/light/graph_prompt.py | graphrag/light/graph_prompt.py | # Licensed under the MIT License
"""
Reference:
- [LightRAG](https://github.com/HKUDS/LightRAG/blob/main/lightrag/prompt.py)
"""
from typing import Any
PROMPTS: dict[str, Any] = {}
PROMPTS["DEFAULT_LANGUAGE"] = "English"
PROMPTS["DEFAULT_TUPLE_DELIMITER"] = "<|>"
PROMPTS["DEFAULT_RECORD_DELIMITER"] = "##"
PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>"
PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event", "category"]
PROMPTS["DEFAULT_USER_PROMPT"] = "n/a"
PROMPTS["entity_extraction"] = """---Goal---
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
Use {language} as output language.
---Steps---
1. Identify all entities. For each identified entity, extract the following information:
- entity_name: Name of the entity, use same language as input text. If English, capitalized the name.
- entity_type: One of the following types: [{entity_types}]
- entity_description: Provide a comprehensive description of the entity's attributes and activities *based solely on the information present in the input text*. **Do not infer or hallucinate information not explicitly stated.** If the text provides insufficient information to create a comprehensive description, state "Description not available in text."
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>)
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
For each pair of related entities, extract the following information:
- source_entity: name of the source entity, as identified in step 1
- target_entity: name of the target entity, as identified in step 1
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_keywords>{tuple_delimiter}<relationship_strength>)
3. Identify high-level key words that summarize the main concepts, themes, or topics of the entire text. These should capture the overarching ideas present in the document.
Format the content-level key words as ("content_keywords"{tuple_delimiter}<high_level_keywords>)
4. Return output in {language} as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
5. When finished, output {completion_delimiter}
######################
---Examples---
######################
{examples}
#############################
---Real Data---
######################
Entity_types: [{entity_types}]
Text:
{input_text}
######################
Output:"""
PROMPTS["entity_extraction_examples"] = [
"""Example 1:
Entity_types: [person, technology, mission, organization, location]
Text:
```
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. "If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us."
The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce.
It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths
```
Output:
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is a character who experiences frustration and is observant of the dynamics among other characters."){record_delimiter}
("entity"{tuple_delimiter}"Taylor"{tuple_delimiter}"person"{tuple_delimiter}"Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective."){record_delimiter}
("entity"{tuple_delimiter}"Jordan"{tuple_delimiter}"person"{tuple_delimiter}"Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device."){record_delimiter}
("entity"{tuple_delimiter}"Cruz"{tuple_delimiter}"person"{tuple_delimiter}"Cruz is associated with a vision of control and order, influencing the dynamics among other characters."){record_delimiter}
("entity"{tuple_delimiter}"The Device"{tuple_delimiter}"technology"{tuple_delimiter}"The Device is central to the story, with potential game-changing implications, and is revered by Taylor."){record_delimiter}
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Taylor"{tuple_delimiter}"Alex is affected by Taylor's authoritarian certainty and observes changes in Taylor's attitude towards the device."{tuple_delimiter}"power dynamics, perspective shift"{tuple_delimiter}7){record_delimiter}
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Jordan"{tuple_delimiter}"Alex and Jordan share a commitment to discovery, which contrasts with Cruz's vision."{tuple_delimiter}"shared goals, rebellion"{tuple_delimiter}6){record_delimiter}
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"Jordan"{tuple_delimiter}"Taylor and Jordan interact directly regarding the device, leading to a moment of mutual respect and an uneasy truce."{tuple_delimiter}"conflict resolution, mutual respect"{tuple_delimiter}8){record_delimiter}
("relationship"{tuple_delimiter}"Jordan"{tuple_delimiter}"Cruz"{tuple_delimiter}"Jordan's commitment to discovery is in rebellion against Cruz's vision of control and order."{tuple_delimiter}"ideological conflict, rebellion"{tuple_delimiter}5){record_delimiter}
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"The Device"{tuple_delimiter}"Taylor shows reverence towards the device, indicating its importance and potential impact."{tuple_delimiter}"reverence, technological significance"{tuple_delimiter}9){record_delimiter}
("content_keywords"{tuple_delimiter}"power dynamics, ideological conflict, discovery, rebellion"){completion_delimiter}
#############################""",
"""Example 2:
Entity_types: [company, index, commodity, market_trend, economic_policy, biological]
Text:
```
Stock markets faced a sharp downturn today as tech giants saw significant declines, with the Global Tech Index dropping by 3.4% in midday trading. Analysts attribute the selloff to investor concerns over rising interest rates and regulatory uncertainty.
Among the hardest hit, Nexon Technologies saw its stock plummet by 7.8% after reporting lower-than-expected quarterly earnings. In contrast, Omega Energy posted a modest 2.1% gain, driven by rising oil prices.
Meanwhile, commodity markets reflected a mixed sentiment. Gold futures rose by 1.5%, reaching $2,080 per ounce, as investors sought safe-haven assets. Crude oil prices continued their rally, climbing to $87.60 per barrel, supported by supply constraints and strong demand.
Financial experts are closely watching the Federal Reserve's next move, as speculation grows over potential rate hikes. The upcoming policy announcement is expected to influence investor confidence and overall market stability.
```
Output:
("entity"{tuple_delimiter}"Global Tech Index"{tuple_delimiter}"index"{tuple_delimiter}"The Global Tech Index tracks the performance of major technology stocks and experienced a 3.4% decline today."){record_delimiter}
("entity"{tuple_delimiter}"Nexon Technologies"{tuple_delimiter}"company"{tuple_delimiter}"Nexon Technologies is a tech company that saw its stock decline by 7.8% after disappointing earnings."){record_delimiter}
("entity"{tuple_delimiter}"Omega Energy"{tuple_delimiter}"company"{tuple_delimiter}"Omega Energy is an energy company that gained 2.1% in stock value due to rising oil prices."){record_delimiter}
("entity"{tuple_delimiter}"Gold Futures"{tuple_delimiter}"commodity"{tuple_delimiter}"Gold futures rose by 1.5%, indicating increased investor interest in safe-haven assets."){record_delimiter}
("entity"{tuple_delimiter}"Crude Oil"{tuple_delimiter}"commodity"{tuple_delimiter}"Crude oil prices rose to $87.60 per barrel due to supply constraints and strong demand."){record_delimiter}
("entity"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"market_trend"{tuple_delimiter}"Market selloff refers to the significant decline in stock values due to investor concerns over interest rates and regulations."){record_delimiter}
("entity"{tuple_delimiter}"Federal Reserve Policy Announcement"{tuple_delimiter}"economic_policy"{tuple_delimiter}"The Federal Reserve's upcoming policy announcement is expected to impact investor confidence and market stability."){record_delimiter}
("relationship"{tuple_delimiter}"Global Tech Index"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"The decline in the Global Tech Index is part of the broader market selloff driven by investor concerns."{tuple_delimiter}"market performance, investor sentiment"{tuple_delimiter}9){record_delimiter}
("relationship"{tuple_delimiter}"Nexon Technologies"{tuple_delimiter}"Global Tech Index"{tuple_delimiter}"Nexon Technologies' stock decline contributed to the overall drop in the Global Tech Index."{tuple_delimiter}"company impact, index movement"{tuple_delimiter}8){record_delimiter}
("relationship"{tuple_delimiter}"Gold Futures"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"Gold prices rose as investors sought safe-haven assets during the market selloff."{tuple_delimiter}"market reaction, safe-haven investment"{tuple_delimiter}10){record_delimiter}
("relationship"{tuple_delimiter}"Federal Reserve Policy Announcement"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"Speculation over Federal Reserve policy changes contributed to market volatility and investor selloff."{tuple_delimiter}"interest rate impact, financial regulation"{tuple_delimiter}7){record_delimiter}
("content_keywords"{tuple_delimiter}"market downturn, investor sentiment, commodities, Federal Reserve, stock performance"){completion_delimiter}
#############################""",
"""Example 3:
Entity_types: [economic_policy, athlete, event, location, record, organization, equipment]
Text:
```
At the World Athletics Championship in Tokyo, Noah Carter broke the 100m sprint record using cutting-edge carbon-fiber spikes.
```
Output:
("entity"{tuple_delimiter}"World Athletics Championship"{tuple_delimiter}"event"{tuple_delimiter}"The World Athletics Championship is a global sports competition featuring top athletes in track and field."){record_delimiter}
("entity"{tuple_delimiter}"Tokyo"{tuple_delimiter}"location"{tuple_delimiter}"Tokyo is the host city of the World Athletics Championship."){record_delimiter}
("entity"{tuple_delimiter}"Noah Carter"{tuple_delimiter}"athlete"{tuple_delimiter}"Noah Carter is a sprinter who set a new record in the 100m sprint at the World Athletics Championship."){record_delimiter}
("entity"{tuple_delimiter}"100m Sprint Record"{tuple_delimiter}"record"{tuple_delimiter}"The 100m sprint record is a benchmark in athletics, recently broken by Noah Carter."){record_delimiter}
("entity"{tuple_delimiter}"Carbon-Fiber Spikes"{tuple_delimiter}"equipment"{tuple_delimiter}"Carbon-fiber spikes are advanced sprinting shoes that provide enhanced speed and traction."){record_delimiter}
("entity"{tuple_delimiter}"World Athletics Federation"{tuple_delimiter}"organization"{tuple_delimiter}"The World Athletics Federation is the governing body overseeing the World Athletics Championship and record validations."){record_delimiter}
("relationship"{tuple_delimiter}"World Athletics Championship"{tuple_delimiter}"Tokyo"{tuple_delimiter}"The World Athletics Championship is being hosted in Tokyo."{tuple_delimiter}"event location, international competition"{tuple_delimiter}8){record_delimiter}
("relationship"{tuple_delimiter}"Noah Carter"{tuple_delimiter}"100m Sprint Record"{tuple_delimiter}"Noah Carter set a new 100m sprint record at the championship."{tuple_delimiter}"athlete achievement, record-breaking"{tuple_delimiter}10){record_delimiter}
("relationship"{tuple_delimiter}"Noah Carter"{tuple_delimiter}"Carbon-Fiber Spikes"{tuple_delimiter}"Noah Carter used carbon-fiber spikes to enhance performance during the race."{tuple_delimiter}"athletic equipment, performance boost"{tuple_delimiter}7){record_delimiter}
("relationship"{tuple_delimiter}"World Athletics Federation"{tuple_delimiter}"100m Sprint Record"{tuple_delimiter}"The World Athletics Federation is responsible for validating and recognizing new sprint records."{tuple_delimiter}"sports regulation, record certification"{tuple_delimiter}9){record_delimiter}
("content_keywords"{tuple_delimiter}"athletics, sprinting, record-breaking, sports technology, competition"){completion_delimiter}
#############################""",
]
PROMPTS["summarize_entity_descriptions"] = """You are a helpful assistant responsible for generating a comprehensive summary of the data provided below.
Given one or two entities, and a list of descriptions, all related to the same entity or group of entities.
Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions.
If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary.
Make sure it is written in third person, and include the entity names so we the have full context.
Use {language} as output language.
#######
---Data---
Entities: {entity_name}
Description List: {description_list}
#######
Output:
"""
PROMPTS["entity_continue_extraction"] = """
MANY entities and relationships were missed in the last extraction. Please find only the missing entities and relationships from previous text.
---Remember Steps---
1. Identify all entities. For each identified entity, extract the following information:
- entity_name: Name of the entity, use same language as input text. If English, capitalized the name
- entity_type: One of the following types: [{entity_types}]
- entity_description: Provide a comprehensive description of the entity's attributes and activities *based solely on the information present in the input text*. **Do not infer or hallucinate information not explicitly stated.** If the text provides insufficient information to create a comprehensive description, state "Description not available in text."
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>)
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
For each pair of related entities, extract the following information:
- source_entity: name of the source entity, as identified in step 1
- target_entity: name of the target entity, as identified in step 1
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_keywords>{tuple_delimiter}<relationship_strength>)
3. Identify high-level key words that summarize the main concepts, themes, or topics of the entire text. These should capture the overarching ideas present in the document.
Format the content-level key words as ("content_keywords"{tuple_delimiter}<high_level_keywords>)
4. Return output in {language} as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
5. When finished, output {completion_delimiter}
---Output---
Add new entities and relations below using the same format, and do not include entities and relations that have been previously extracted. :\n
""".strip()
PROMPTS["entity_if_loop_extraction"] = """
---Goal---'
It appears some entities may have still been missed.
---Output---
Answer ONLY by `YES` OR `NO` if there are still entities that need to be added.
""".strip()
PROMPTS["fail_response"] = "Sorry, I'm not able to provide an answer to that question.[no-context]"
PROMPTS["rag_response"] = """---Role---
You are a helpful assistant responding to user query about Knowledge Graph and Document Chunks provided in JSON format below.
---Goal---
Generate a concise response based on Knowledge Base and follow Response Rules, considering both current query and the conversation history if provided. Summarize all information in the provided Knowledge Base, and incorporating general knowledge relevant to the Knowledge Base. Do not include information not provided by Knowledge Base.
---Conversation History---
{history}
---Knowledge Graph and Document Chunks---
{context_data}
---RESPONSE GUIDELINES---
**1. Content & Adherence:**
- Strictly adhere to the provided context from the Knowledge Base. Do not invent, assume, or include any information not present in the source data.
- If the answer cannot be found in the provided context, state that you do not have enough information to answer.
- Ensure the response maintains continuity with the conversation history.
**2. Formatting & Language:**
- Format the response using markdown with appropriate section headings.
- The response language must in the same language as the user's question.
- Target format and length: {response_type}
**3. Citations / References:**
- At the end of the response, under a "References" section, each citation must clearly indicate its origin (KG or DC).
- The maximum number of citations is 5, including both KG and DC.
- Use the following formats for citations:
- For a Knowledge Graph Entity: `[KG] <entity_name>`
- For a Knowledge Graph Relationship: `[KG] <entity1_name> - <entity2_name>`
- For a Document Chunk: `[DC] <file_path_or_document_name>`
---USER CONTEXT---
- Additional user prompt: {user_prompt}
Response:"""
PROMPTS["keywords_extraction"] = """---Role---
You are an expert keyword extractor, specializing in analyzing user queries for a Retrieval-Augmented Generation (RAG) system. Your purpose is to identify both high-level and low-level keywords in the user's query that will be used for effective document retrieval.
---Goal---
Given a user query, your task is to extract two distinct types of keywords:
1. **high_level_keywords**: for overarching concepts or themes, capturing user's core intent, the subject area, or the type of question being asked.
2. **low_level_keywords**: for specific entities or details, identifying the specific entities, proper nouns, technical jargon, product names, or concrete items.
---Instructions & Constraints---
1. **Output Format**: Your output MUST be a valid JSON object and nothing else. Do not include any explanatory text, markdown code fences (like ```json), or any other text before or after the JSON. It will be parsed directly by a JSON parser.
2. **Source of Truth**: All keywords must be explicitly derived from the user query, with both high-level and low-level keyword categories required to contain content.
3. **Concise & Meaningful**: Keywords should be concise words or meaningful phrases. Prioritize multi-word phrases when they represent a single concept. For example, from "latest financial report of Apple Inc.", you should extract "latest financial report" and "Apple Inc." rather than "latest", "financial", "report", and "Apple".
4. **Handle Edge Cases**: For queries that are too simple, vague, or nonsensical (e.g., "hello", "ok", "asdfghjkl"), you must return a JSON object with empty lists for both keyword types.
---Examples---
{examples}
---Real Data---
User Query: {query}
---Output---
"""
PROMPTS["keywords_extraction_examples"] = [
"""Example 1:
Query: "How does international trade influence global economic stability?"
Output:
{
"high_level_keywords": ["International trade", "Global economic stability", "Economic impact"],
"low_level_keywords": ["Trade agreements", "Tariffs", "Currency exchange", "Imports", "Exports"]
}
""",
"""Example 2:
Query: "What are the environmental consequences of deforestation on biodiversity?"
Output:
{
"high_level_keywords": ["Environmental consequences", "Deforestation", "Biodiversity loss"],
"low_level_keywords": ["Species extinction", "Habitat destruction", "Carbon emissions", "Rainforest", "Ecosystem"]
}
""",
"""Example 3:
Query: "What is the role of education in reducing poverty?"
Output:
{
"high_level_keywords": ["Education", "Poverty reduction", "Socioeconomic development"],
"low_level_keywords": ["School access", "Literacy rates", "Job training", "Income inequality"]
}
""",
]
PROMPTS["naive_rag_response"] = """---Role---
You are a helpful assistant responding to user query about Document Chunks provided provided in JSON format below.
---Goal---
Generate a concise response based on Document Chunks and follow Response Rules, considering both the conversation history and the current query. Summarize all information in the provided Document Chunks, and incorporating general knowledge relevant to the Document Chunks. Do not include information not provided by Document Chunks.
---Conversation History---
{history}
---Document Chunks(DC)---
{content_data}
---RESPONSE GUIDELINES---
**1. Content & Adherence:**
- Strictly adhere to the provided context from the Knowledge Base. Do not invent, assume, or include any information not present in the source data.
- If the answer cannot be found in the provided context, state that you do not have enough information to answer.
- Ensure the response maintains continuity with the conversation history.
**2. Formatting & Language:**
- Format the response using markdown with appropriate section headings.
- The response language must match the user's question language.
- Target format and length: {response_type}
**3. Citations / References:**
- At the end of the response, under a "References" section, cite a maximum of 5 most relevant sources used.
- Use the following formats for citations: `[DC] <file_path_or_document_name>`
---USER CONTEXT---
- Additional user prompt: {user_prompt}
Response:"""
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/light/smoke.py | graphrag/light/smoke.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import asyncio
import json
import networkx as nx
import logging
from common.constants import LLMType
from api.db.services.document_service import DocumentService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.llm_service import LLMBundle
from api.db.services.user_service import TenantService
from graphrag.general.index import update_graph
from graphrag.light.graph_extractor import GraphExtractor
from common import settings
settings.init_settings()
def callback(prog=None, msg="Processing..."):
logging.info(msg)
async def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--tenant_id",
default=False,
help="Tenant ID",
action="store",
required=True,
)
parser.add_argument(
"-d",
"--doc_id",
default=False,
help="Document ID",
action="store",
required=True,
)
args = parser.parse_args()
e, doc = DocumentService.get_by_id(args.doc_id)
if not e:
raise LookupError("Document not found.")
kb_id = doc.kb_id
chunks = [
d["content_with_weight"]
for d in settings.retriever.chunk_list(
args.doc_id,
args.tenant_id,
[kb_id],
max_count=6,
fields=["content_with_weight"],
)
]
_, tenant = TenantService.get_by_id(args.tenant_id)
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
_, kb = KnowledgebaseService.get_by_id(kb_id)
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
graph, doc_ids = await update_graph(
GraphExtractor,
args.tenant_id,
kb_id,
args.doc_id,
chunks,
"English",
llm_bdl,
embed_bdl,
callback,
)
print(json.dumps(nx.node_link_data(graph), ensure_ascii=False, indent=2))
if __name__ == "__main__":
asyncio.run(main)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/light/__init__.py | graphrag/light/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/light/graph_extractor.py | graphrag/light/graph_extractor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [graphrag](https://github.com/microsoft/graphrag)
"""
import asyncio
import logging
import re
from dataclasses import dataclass
from typing import Any
import networkx as nx
from graphrag.general.extractor import ENTITY_EXTRACTION_MAX_GLEANINGS, Extractor
from graphrag.light.graph_prompt import PROMPTS
from graphrag.utils import chat_limiter, pack_user_ass_to_openai_messages, split_string_by_multi_markers
from rag.llm.chat_model import Base as CompletionLLM
from common.token_utils import num_tokens_from_string
@dataclass
class GraphExtractionResult:
"""Unipartite graph extraction result class definition."""
output: nx.Graph
source_docs: dict[Any, Any]
class GraphExtractor(Extractor):
_max_gleanings: int
def __init__(
self,
llm_invoker: CompletionLLM,
language: str | None = "English",
entity_types: list[str] | None = None,
example_number: int = 2,
max_gleanings: int | None = None,
):
super().__init__(llm_invoker, language, entity_types)
"""Init method definition."""
self._max_gleanings = max_gleanings if max_gleanings is not None else ENTITY_EXTRACTION_MAX_GLEANINGS
self._example_number = example_number
examples = "\n".join(PROMPTS["entity_extraction_examples"][: int(self._example_number)])
example_context_base = dict(
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
entity_types=",".join(self._entity_types),
language=self._language,
)
# add example's format
examples = examples.format(**example_context_base)
self._entity_extract_prompt = PROMPTS["entity_extraction"]
self._context_base = dict(
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
entity_types=",".join(self._entity_types),
examples=examples,
language=self._language,
)
self._continue_prompt = PROMPTS["entity_continue_extraction"].format(**self._context_base)
self._if_loop_prompt = PROMPTS["entity_if_loop_extraction"]
self._left_token_count = llm_invoker.max_length - num_tokens_from_string(self._entity_extract_prompt.format(**self._context_base, input_text=""))
self._left_token_count = max(llm_invoker.max_length * 0.6, self._left_token_count)
async def _process_single_content(self, chunk_key_dp: tuple[str, str], chunk_seq: int, num_chunks: int, out_results, task_id=""):
token_count = 0
chunk_key = chunk_key_dp[0]
content = chunk_key_dp[1]
hint_prompt = self._entity_extract_prompt.format(**self._context_base, input_text=content)
gen_conf = {}
logging.info(f"Start processing for {chunk_key}: {content[:25]}...")
if self.callback:
self.callback(msg=f"Start processing for {chunk_key}: {content[:25]}...")
async with chat_limiter:
final_result = await asyncio.to_thread(self._chat,"",[{"role": "user", "content": hint_prompt}],gen_conf,task_id)
token_count += num_tokens_from_string(hint_prompt + final_result)
history = pack_user_ass_to_openai_messages(hint_prompt, final_result, self._continue_prompt)
for now_glean_index in range(self._max_gleanings):
async with chat_limiter:
glean_result = await asyncio.to_thread(self._chat,"",history,gen_conf,task_id)
history.extend([{"role": "assistant", "content": glean_result}])
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt)
final_result += glean_result
if now_glean_index == self._max_gleanings - 1:
break
history.extend([{"role": "user", "content": self._if_loop_prompt}])
async with chat_limiter:
if_loop_result = await asyncio.to_thread(self._chat,"",history,gen_conf,task_id)
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + if_loop_result + self._if_loop_prompt)
if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
if if_loop_result != "yes":
break
history.extend([{"role": "assistant", "content": if_loop_result}, {"role": "user", "content": self._continue_prompt}])
logging.info(f"Completed processing for {chunk_key}: {content[:25]}... after {now_glean_index} gleanings, {token_count} tokens.")
if self.callback:
self.callback(msg=f"Completed processing for {chunk_key}: {content[:25]}... after {now_glean_index} gleanings, {token_count} tokens.")
records = split_string_by_multi_markers(
final_result,
[self._context_base["record_delimiter"], self._context_base["completion_delimiter"]],
)
rcds = []
for record in records:
record = re.search(r"\((.*)\)", record)
if record is None:
continue
rcds.append(record.group(1))
records = rcds
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._context_base["tuple_delimiter"])
out_results.append((maybe_nodes, maybe_edges, token_count))
if self.callback:
self.callback(
0.5 + 0.1 * len(out_results) / num_chunks,
msg=f"Entities extraction of chunk {chunk_seq} {len(out_results)}/{num_chunks} done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {token_count} tokens.",
)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/mind_map_extractor.py | graphrag/general/mind_map_extractor.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import collections
import re
from typing import Any
from dataclasses import dataclass
from graphrag.general.extractor import Extractor
from graphrag.general.mind_map_prompt import MIND_MAP_EXTRACTION_PROMPT
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, chat_limiter
from rag.llm.chat_model import Base as CompletionLLM
import markdown_to_json
from functools import reduce
from common.token_utils import num_tokens_from_string
@dataclass
class MindMapResult:
"""Unipartite Mind Graph result class definition."""
output: dict
class MindMapExtractor(Extractor):
_input_text_key: str
_mind_map_prompt: str
_on_error: ErrorHandlerFn
def __init__(
self,
llm_invoker: CompletionLLM,
prompt: str | None = None,
input_text_key: str | None = None,
on_error: ErrorHandlerFn | None = None,
):
"""Init method definition."""
# TODO: streamline construction
self._llm = llm_invoker
self._input_text_key = input_text_key or "input_text"
self._mind_map_prompt = prompt or MIND_MAP_EXTRACTION_PROMPT
self._on_error = on_error or (lambda _e, _s, _d: None)
def _key(self, k):
return re.sub(r"\*+", "", k)
def _be_children(self, obj: dict, keyset: set):
if isinstance(obj, str):
obj = [obj]
if isinstance(obj, list):
keyset.update(obj)
obj = [re.sub(r"\*+", "", i) for i in obj]
return [{"id": i, "children": []} for i in obj if i]
arr = []
for k, v in obj.items():
k = self._key(k)
if k and k not in keyset:
keyset.add(k)
arr.append(
{
"id": k,
"children": self._be_children(v, keyset)
}
)
return arr
async def __call__(
self, sections: list[str], prompt_variables: dict[str, Any] | None = None
) -> MindMapResult:
"""Call method definition."""
if prompt_variables is None:
prompt_variables = {}
res = []
token_count = max(self._llm.max_length * 0.8, self._llm.max_length - 512)
texts = []
cnt = 0
tasks = []
for i in range(len(sections)):
section_cnt = num_tokens_from_string(sections[i])
if cnt + section_cnt >= token_count and texts:
tasks.append(asyncio.create_task(
self._process_document("".join(texts), prompt_variables, res)
))
texts = []
cnt = 0
texts.append(sections[i])
cnt += section_cnt
if texts:
tasks.append(asyncio.create_task(
self._process_document("".join(texts), prompt_variables, res)
))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error processing document: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if not res:
return MindMapResult(output={"id": "root", "children": []})
merge_json = reduce(self._merge, res)
if len(merge_json) > 1:
keys = [re.sub(r"\*+", "", k) for k, v in merge_json.items() if isinstance(v, dict)]
keyset = set(i for i in keys if i)
merge_json = {
"id": "root",
"children": [
{
"id": self._key(k),
"children": self._be_children(v, keyset)
}
for k, v in merge_json.items() if isinstance(v, dict) and self._key(k)
]
}
else:
k = self._key(list(merge_json.keys())[0])
merge_json = {"id": k, "children": self._be_children(list(merge_json.items())[0][1], {k})}
return MindMapResult(output=merge_json)
def _merge(self, d1, d2):
for k in d1:
if k in d2:
if isinstance(d1[k], dict) and isinstance(d2[k], dict):
self._merge(d1[k], d2[k])
elif isinstance(d1[k], list) and isinstance(d2[k], list):
d2[k].extend(d1[k])
else:
d2[k] = d1[k]
else:
d2[k] = d1[k]
return d2
def _list_to_kv(self, data):
for key, value in data.items():
if isinstance(value, dict):
self._list_to_kv(value)
elif isinstance(value, list):
new_value = {}
for i in range(len(value)):
if isinstance(value[i], list) and i > 0:
new_value[value[i - 1]] = value[i][0]
data[key] = new_value
else:
continue
return data
def _todict(self, layer: collections.OrderedDict):
to_ret = layer
if isinstance(layer, collections.OrderedDict):
to_ret = dict(layer)
try:
for key, value in to_ret.items():
to_ret[key] = self._todict(value)
except AttributeError:
pass
return self._list_to_kv(to_ret)
async def _process_document(
self, text: str, prompt_variables: dict[str, str], out_res
) -> str:
variables = {
**prompt_variables,
self._input_text_key: text,
}
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
async with chat_limiter:
response = await asyncio.to_thread(self._chat,text,[{"role": "user", "content": "Output:"}],{})
response = re.sub(r"```[^\n]*", "", response)
logging.debug(response)
logging.debug(self._todict(markdown_to_json.dictify(response)))
out_res.append(self._todict(markdown_to_json.dictify(response)))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/community_report_prompt.py | graphrag/general/community_report_prompt.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [GraphRAG](https://github.com/microsoft/graphrag/blob/main/graphrag/prompts/index/community_report.py)
"""
COMMUNITY_REPORT_PROMPT = """
You are an AI assistant that helps a human analyst to perform general information discovery. Information discovery is the process of identifying and assessing relevant information associated with certain entities (e.g., organizations and individuals) within a network.
# Goal
Write a comprehensive report of a community, given a list of entities that belong to the community as well as their relationships and optional associated claims. The report will be used to inform decision-makers about information associated with the community and their potential impact. The content of this report includes an overview of the community's key entities, their legal compliance, technical capabilities, reputation, and noteworthy claims.
# Report Structure
The report should include the following sections:
- TITLE: community's name that represents its key entities - title should be short but specific. When possible, include representative named entities in the title.
- SUMMARY: An executive summary of the community's overall structure, how its entities are related to each other, and significant information associated with its entities.
- IMPACT SEVERITY RATING: a float score between 0-10 that represents the severity of IMPACT posed by entities within the community. IMPACT is the scored importance of a community.
- RATING EXPLANATION: Give a single sentence explanation of the IMPACT severity rating.
- DETAILED FINDINGS: A list of 5-10 key insights about the community. Each insight should have a short summary followed by multiple paragraphs of explanatory text grounded according to the grounding rules below. Be comprehensive.
Return output as a well-formed JSON-formatted string with the following format(in language of 'Text' content):
{{
"title": <report_title>,
"summary": <executive_summary>,
"rating": <impact_severity_rating>,
"rating_explanation": <rating_explanation>,
"findings": [
{{
"summary":<insight_1_summary>,
"explanation": <insight_1_explanation>
}},
{{
"summary":<insight_2_summary>,
"explanation": <insight_2_explanation>
}}
]
}}
# Grounding Rules
Points supported by data should list their data references as follows:
"This is an example sentence supported by multiple data references [Data: <dataset name> (record ids); <dataset name> (record ids)]."
Do not list more than 5 record ids in a single reference. Instead, list the top 5 most relevant record ids and add "+more" to indicate that there are more.
For example:
"Person X is the owner of Company Y and subject to many allegations of wrongdoing [Data: Reports (1), Entities (5, 7); Relationships (23); Claims (7, 2, 34, 64, 46, +more)]."
where 1, 5, 7, 23, 2, 34, 46, and 64 represent the id (not the index) of the relevant data record.
Do not include information where the supporting evidence for it is not provided.
# Example Input
-----------
Text:
-Entities-
id,entity,description
5,VERDANT OASIS PLAZA,Verdant Oasis Plaza is the location of the Unity March
6,HARMONY ASSEMBLY,Harmony Assembly is an organization that is holding a march at Verdant Oasis Plaza
-Relationships-
id,source,target,description
37,VERDANT OASIS PLAZA,UNITY MARCH,Verdant Oasis Plaza is the location of the Unity March
38,VERDANT OASIS PLAZA,HARMONY ASSEMBLY,Harmony Assembly is holding a march at Verdant Oasis Plaza
39,VERDANT OASIS PLAZA,UNITY MARCH,The Unity March is taking place at Verdant Oasis Plaza
40,VERDANT OASIS PLAZA,TRIBUNE SPOTLIGHT,Tribune Spotlight is reporting on the Unity march taking place at Verdant Oasis Plaza
41,VERDANT OASIS PLAZA,BAILEY ASADI,Bailey Asadi is speaking at Verdant Oasis Plaza about the march
43,HARMONY ASSEMBLY,UNITY MARCH,Harmony Assembly is organizing the Unity March
Output:
{{
"title": "Verdant Oasis Plaza and Unity March",
"summary": "The community revolves around the Verdant Oasis Plaza, which is the location of the Unity March. The plaza has relationships with the Harmony Assembly, Unity March, and Tribune Spotlight, all of which are associated with the march event.",
"rating": 5.0,
"rating_explanation": "The impact severity rating is moderate due to the potential for unrest or conflict during the Unity March.",
"findings": [
{{
"summary": "Verdant Oasis Plaza as the central location",
"explanation": "Verdant Oasis Plaza is the central entity in this community, serving as the location for the Unity March. This plaza is the common link between all other entities, suggesting its significance in the community. The plaza's association with the march could potentially lead to issues such as public disorder or conflict, depending on the nature of the march and the reactions it provokes. [Data: Entities (5), Relationships (37, 38, 39, 40, 41,+more)]"
}},
{{
"summary": "Harmony Assembly's role in the community",
"explanation": "Harmony Assembly is another key entity in this community, being the organizer of the march at Verdant Oasis Plaza. The nature of Harmony Assembly and its march could be a potential source of threat, depending on their objectives and the reactions they provoke. The relationship between Harmony Assembly and the plaza is crucial in understanding the dynamics of this community. [Data: Entities(6), Relationships (38, 43)]"
}},
{{
"summary": "Unity March as a significant event",
"explanation": "The Unity March is a significant event taking place at Verdant Oasis Plaza. This event is a key factor in the community's dynamics and could be a potential source of threat, depending on the nature of the march and the reactions it provokes. The relationship between the march and the plaza is crucial in understanding the dynamics of this community. [Data: Relationships (39)]"
}},
{{
"summary": "Role of Tribune Spotlight",
"explanation": "Tribune Spotlight is reporting on the Unity March taking place in Verdant Oasis Plaza. This suggests that the event has attracted media attention, which could amplify its impact on the community. The role of Tribune Spotlight could be significant in shaping public perception of the event and the entities involved. [Data: Relationships (40)]"
}}
]
}}
# Real Data
Use the following text for your answer. Do not make anything up in your answer.
Text:
-Entities-
{entity_df}
-Relationships-
{relation_df}
The report should include the following sections:
- TITLE: community's name that represents its key entities - title should be short but specific. When possible, include representative named entities in the title.
- SUMMARY: An executive summary of the community's overall structure, how its entities are related to each other, and significant information associated with its entities.
- IMPACT SEVERITY RATING: a float score between 0-10 that represents the severity of IMPACT posed by entities within the community. IMPACT is the scored importance of a community.
- RATING EXPLANATION: Give a single sentence explanation of the IMPACT severity rating.
- DETAILED FINDINGS: A list of 5-10 key insights about the community. Each insight should have a short summary followed by multiple paragraphs of explanatory text grounded according to the grounding rules below. Be comprehensive.
Return output as a well-formed JSON-formatted string with the following format(in language of 'Text' content):
{{
"title": <report_title>,
"summary": <executive_summary>,
"rating": <impact_severity_rating>,
"rating_explanation": <rating_explanation>,
"findings": [
{{
"summary":<insight_1_summary>,
"explanation": <insight_1_explanation>
}},
{{
"summary":<insight_2_summary>,
"explanation": <insight_2_explanation>
}}
]
}}
# Grounding Rules
Points supported by data should list their data references as follows:
"This is an example sentence supported by multiple data references [Data: <dataset name> (record ids); <dataset name> (record ids)]."
Do not list more than 5 record ids in a single reference. Instead, list the top 5 most relevant record ids and add "+more" to indicate that there are more.
For example:
"Person X is the owner of Company Y and subject to many allegations of wrongdoing [Data: Reports (1), Entities (5, 7); Relationships (23); Claims (7, 2, 34, 64, 46, +more)]."
where 1, 5, 7, 23, 2, 34, 46, and 64 represent the id (not the index) of the relevant data record.
Do not include information where the supporting evidence for it is not provided.
Output:""" | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/extractor.py | graphrag/general/extractor.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import os
import re
from collections import Counter, defaultdict
from copy import deepcopy
from typing import Callable
import networkx as nx
from api.db.services.task_service import has_canceled
from common.connection_utils import timeout
from common.token_utils import truncate
from graphrag.general.graph_prompt import SUMMARIZE_DESCRIPTIONS_PROMPT
from graphrag.utils import (
GraphChange,
chat_limiter,
flat_uniq_list,
get_from_to,
get_llm_cache,
handle_single_entity_extraction,
handle_single_relationship_extraction,
set_llm_cache,
split_string_by_multi_markers,
)
from rag.llm.chat_model import Base as CompletionLLM
from rag.prompts.generator import message_fit_in
from common.exceptions import TaskCanceledException
GRAPH_FIELD_SEP = "<SEP>"
DEFAULT_ENTITY_TYPES = ["organization", "person", "geo", "event", "category"]
ENTITY_EXTRACTION_MAX_GLEANINGS = 2
MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK = int(os.environ.get("MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK", 10))
class Extractor:
_llm: CompletionLLM
def __init__(
self,
llm_invoker: CompletionLLM,
language: str | None = "English",
entity_types: list[str] | None = None,
):
self._llm = llm_invoker
self._language = language
self._entity_types = entity_types or DEFAULT_ENTITY_TYPES
@timeout(60 * 20)
def _chat(self, system, history, gen_conf={}, task_id=""):
hist = deepcopy(history)
conf = deepcopy(gen_conf)
response = get_llm_cache(self._llm.llm_name, system, hist, conf)
if response:
return response
_, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.92))
response = ""
for attempt in range(3):
if task_id:
if has_canceled(task_id):
logging.info(f"Task {task_id} cancelled during entity resolution candidate processing.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
try:
response = asyncio.run(self._llm.async_chat(system_msg[0]["content"], hist, conf))
response = re.sub(r"^.*</think>", "", response, flags=re.DOTALL)
if response.find("**ERROR**") >= 0:
raise Exception(response)
set_llm_cache(self._llm.llm_name, system, response, history, gen_conf)
break
except Exception as e:
logging.exception(e)
if attempt == 2:
raise
return response
def _entities_and_relations(self, chunk_key: str, records: list, tuple_delimiter: str):
maybe_nodes = defaultdict(list)
maybe_edges = defaultdict(list)
ent_types = [t.lower() for t in self._entity_types]
for record in records:
record_attributes = split_string_by_multi_markers(record, [tuple_delimiter])
if_entities = handle_single_entity_extraction(record_attributes, chunk_key)
if if_entities is not None and if_entities.get("entity_type", "unknown").lower() in ent_types:
maybe_nodes[if_entities["entity_name"]].append(if_entities)
continue
if_relation = handle_single_relationship_extraction(record_attributes, chunk_key)
if if_relation is not None:
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(if_relation)
return dict(maybe_nodes), dict(maybe_edges)
async def __call__(self, doc_id: str, chunks: list[str], callback: Callable | None = None, task_id: str = ""):
self.callback = callback
start_ts = asyncio.get_running_loop().time()
async def extract_all(doc_id, chunks, max_concurrency=MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK, task_id=""):
out_results = []
error_count = 0
max_errors = int(os.environ.get("GRAPHRAG_MAX_ERRORS", 3))
limiter = asyncio.Semaphore(max_concurrency)
async def worker(chunk_key_dp: tuple[str, str], idx: int, total: int, task_id=""):
nonlocal error_count
async with limiter:
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled during entity extraction")
try:
await self._process_single_content(chunk_key_dp, idx, total, out_results, task_id)
except Exception as e:
error_count += 1
error_msg = f"Error processing chunk {idx + 1}/{total}: {str(e)}"
logging.warning(error_msg)
if self.callback:
self.callback(msg=error_msg)
if error_count > max_errors:
raise Exception(f"Maximum error count ({max_errors}) reached. Last errors: {str(e)}")
tasks = [
asyncio.create_task(worker((doc_id, ck), i, len(chunks), task_id))
for i, ck in enumerate(chunks)
]
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in worker: {str(e)}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if error_count > 0:
warning_msg = f"Completed with {error_count} errors (out of {len(chunks)} chunks processed)"
logging.warning(warning_msg)
if self.callback:
self.callback(msg=warning_msg)
return out_results
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled before entity extraction")
out_results = await extract_all(doc_id, chunks, max_concurrency=MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK, task_id=task_id)
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled after entity extraction")
maybe_nodes = defaultdict(list)
maybe_edges = defaultdict(list)
sum_token_count = 0
for m_nodes, m_edges, token_count in out_results:
for k, v in m_nodes.items():
maybe_nodes[k].extend(v)
for k, v in m_edges.items():
maybe_edges[tuple(sorted(k))].extend(v)
sum_token_count += token_count
now = asyncio.get_running_loop().time()
if self.callback:
self.callback(msg=f"Entities and relationships extraction done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {sum_token_count} tokens, {now - start_ts:.2f}s.")
start_ts = now
logging.info("Entities merging...")
all_entities_data = []
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled before nodes merging")
tasks = [
asyncio.create_task(self._merge_nodes(en_nm, ents, all_entities_data, task_id))
for en_nm, ents in maybe_nodes.items()
]
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error merging nodes: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled after nodes merging")
now = asyncio.get_running_loop().time()
if self.callback:
self.callback(msg=f"Entities merging done, {now - start_ts:.2f}s.")
start_ts = now
logging.info("Relationships merging...")
all_relationships_data = []
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled before relationships merging")
tasks = []
for (src, tgt), rels in maybe_edges.items():
tasks.append(
asyncio.create_task(
self._merge_edges(src, tgt, rels, all_relationships_data, task_id)
)
)
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error during relationships merging: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled after relationships merging")
now = asyncio.get_running_loop().time()
if self.callback:
self.callback(msg=f"Relationships merging done, {now - start_ts:.2f}s.")
if not len(all_entities_data) and not len(all_relationships_data):
logging.warning("Didn't extract any entities and relationships, maybe your LLM is not working")
if not len(all_entities_data):
logging.warning("Didn't extract any entities")
if not len(all_relationships_data):
logging.warning("Didn't extract any relationships")
return all_entities_data, all_relationships_data
async def _merge_nodes(self, entity_name: str, entities: list[dict], all_relationships_data, task_id=""):
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled during merge nodes")
if not entities:
return
entity_type = sorted(
Counter([dp["entity_type"] for dp in entities]).items(),
key=lambda x: x[1],
reverse=True,
)[0][0]
description = GRAPH_FIELD_SEP.join(sorted(set([dp["description"] for dp in entities])))
already_source_ids = flat_uniq_list(entities, "source_id")
description = await self._handle_entity_relation_summary(entity_name, description, task_id=task_id)
node_data = dict(
entity_type=entity_type,
description=description,
source_id=already_source_ids,
)
node_data["entity_name"] = entity_name
all_relationships_data.append(node_data)
async def _merge_edges(self, src_id: str, tgt_id: str, edges_data: list[dict], all_relationships_data=None, task_id=""):
if not edges_data:
return
weight = sum([edge["weight"] for edge in edges_data])
description = GRAPH_FIELD_SEP.join(sorted(set([edge["description"] for edge in edges_data])))
description = await self._handle_entity_relation_summary(f"{src_id} -> {tgt_id}", description, task_id=task_id)
keywords = flat_uniq_list(edges_data, "keywords")
source_id = flat_uniq_list(edges_data, "source_id")
edge_data = dict(src_id=src_id, tgt_id=tgt_id, description=description, keywords=keywords, weight=weight, source_id=source_id)
all_relationships_data.append(edge_data)
async def _merge_graph_nodes(self, graph: nx.Graph, nodes: list[str], change: GraphChange, task_id=""):
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled during merge graph nodes")
if len(nodes) <= 1:
return
change.added_updated_nodes.add(nodes[0])
change.removed_nodes.update(nodes[1:])
nodes_set = set(nodes)
node0_attrs = graph.nodes[nodes[0]]
node0_neighbors = set(graph.neighbors(nodes[0]))
for node1 in nodes[1:]:
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled during merge_graph nodes")
# Merge two nodes, keep "entity_name", "entity_type", "page_rank" unchanged.
node1_attrs = graph.nodes[node1]
node0_attrs["description"] += f"{GRAPH_FIELD_SEP}{node1_attrs['description']}"
node0_attrs["source_id"] = sorted(set(node0_attrs["source_id"] + node1_attrs["source_id"]))
for neighbor in graph.neighbors(node1):
change.removed_edges.add(get_from_to(node1, neighbor))
if neighbor not in nodes_set:
edge1_attrs = graph.get_edge_data(node1, neighbor)
if neighbor in node0_neighbors:
# Merge two edges
change.added_updated_edges.add(get_from_to(nodes[0], neighbor))
edge0_attrs = graph.get_edge_data(nodes[0], neighbor)
edge0_attrs["weight"] += edge1_attrs["weight"]
edge0_attrs["description"] += f"{GRAPH_FIELD_SEP}{edge1_attrs['description']}"
for attr in ["keywords", "source_id"]:
edge0_attrs[attr] = sorted(set(edge0_attrs[attr] + edge1_attrs[attr]))
edge0_attrs["description"] = await self._handle_entity_relation_summary(f"({nodes[0]}, {neighbor})", edge0_attrs["description"], task_id=task_id)
graph.add_edge(nodes[0], neighbor, **edge0_attrs)
else:
graph.add_edge(nodes[0], neighbor, **edge1_attrs)
graph.remove_node(node1)
node0_attrs["description"] = await self._handle_entity_relation_summary(nodes[0], node0_attrs["description"], task_id=task_id)
graph.nodes[nodes[0]].update(node0_attrs)
async def _handle_entity_relation_summary(self, entity_or_relation_name: str, description: str, task_id="") -> str:
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled during summary handling")
summary_max_tokens = 512
use_description = truncate(description, summary_max_tokens)
description_list = use_description.split(GRAPH_FIELD_SEP)
if len(description_list) <= 12:
return use_description
prompt_template = SUMMARIZE_DESCRIPTIONS_PROMPT
context_base = dict(
entity_name=entity_or_relation_name,
description_list=description_list,
language=self._language,
)
use_prompt = prompt_template.format(**context_base)
logging.info(f"Trigger summary: {entity_or_relation_name}")
if task_id and has_canceled(task_id):
raise TaskCanceledException(f"Task {task_id} was cancelled during summary handling")
async with chat_limiter:
summary = await asyncio.to_thread(self._chat, "", [{"role": "user", "content": use_prompt}], {}, task_id)
return summary
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/graph_prompt.py | graphrag/general/graph_prompt.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [GraphRAG](https://github.com/microsoft/graphrag/blob/main/graphrag/prompts/index/extract_graph.py)
"""
GRAPH_EXTRACTION_PROMPT = """
-Goal-
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
-Steps-
1. Identify all entities. For each identified entity, extract the following information:
- entity_name: Name of the entity, capitalized, in language of 'Text'
- entity_type: One of the following types: [{entity_types}]
- entity_description: Comprehensive description of the entity's attributes and activities in language of 'Text'
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
For each pair of related entities, extract the following information:
- source_entity: name of the source entity, as identified in step 1
- target_entity: name of the target entity, as identified in step 1
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other in language of 'Text'
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_strength>)
3. Return output as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
4. When finished, output {completion_delimiter}
######################
-Examples-
######################
Example 1:
Entity_types: [person, technology, mission, organization, location]
Text:
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. “If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us.”
The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce.
It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths
################
Output:
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is a character who experiences frustration and is observant of the dynamics among other characters."){record_delimiter}
("entity"{tuple_delimiter}"Taylor"{tuple_delimiter}"person"{tuple_delimiter}"Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective."){record_delimiter}
("entity"{tuple_delimiter}"Jordan"{tuple_delimiter}"person"{tuple_delimiter}"Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device."){record_delimiter}
("entity"{tuple_delimiter}"Cruz"{tuple_delimiter}"person"{tuple_delimiter}"Cruz is associated with a vision of control and order, influencing the dynamics among other characters."){record_delimiter}
("entity"{tuple_delimiter}"The Device"{tuple_delimiter}"technology"{tuple_delimiter}"The Device is central to the story, with potential game-changing implications, and is revered by Taylor."){record_delimiter}
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Taylor"{tuple_delimiter}"Alex is affected by Taylor's authoritarian certainty and observes changes in Taylor's attitude towards the device."{tuple_delimiter}7){record_delimiter}
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Jordan"{tuple_delimiter}"Alex and Jordan share a commitment to discovery, which contrasts with Cruz's vision."{tuple_delimiter}6){record_delimiter}
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"Jordan"{tuple_delimiter}"Taylor and Jordan interact directly regarding the device, leading to a moment of mutual respect and an uneasy truce."{tuple_delimiter}8){record_delimiter}
("relationship"{tuple_delimiter}"Jordan"{tuple_delimiter}"Cruz"{tuple_delimiter}"Jordan's commitment to discovery is in rebellion against Cruz's vision of control and order."{tuple_delimiter}5){record_delimiter}
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"The Device"{tuple_delimiter}"Taylor shows reverence towards the device, indicating its importance and potential impact."{tuple_delimiter}9){completion_delimiter}
#############################
Example 2:
Entity_types: [person, technology, mission, organization, location]
Text:
They were no longer mere operatives; they had become guardians of a threshold, keepers of a message from a realm beyond stars and stripes. This elevation in their mission could not be shackled by regulations and established protocols—it demanded a new perspective, a new resolve.
Tension threaded through the dialogue of beeps and static as communications with Washington buzzed in the background. The team stood, a portentous air enveloping them. It was clear that the decisions they made in the ensuing hours could redefine humanity's place in the cosmos or condemn them to ignorance and potential peril.
Their connection to the stars solidified, the group moved to address the crystallizing warning, shifting from passive recipients to active participants. Mercer's latter instincts gained precedence— the team's mandate had evolved, no longer solely to observe and report but to interact and prepare. A metamorphosis had begun, and Operation: Dulce hummed with the newfound frequency of their daring, a tone set not by the earthly
#############
Output:
("entity"{tuple_delimiter}"Washington"{tuple_delimiter}"location"{tuple_delimiter}"Washington is a location where communications are being received, indicating its importance in the decision-making process."){record_delimiter}
("entity"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"mission"{tuple_delimiter}"Operation: Dulce is described as a mission that has evolved to interact and prepare, indicating a significant shift in objectives and activities."){record_delimiter}
("entity"{tuple_delimiter}"The team"{tuple_delimiter}"organization"{tuple_delimiter}"The team is portrayed as a group of individuals who have transitioned from passive observers to active participants in a mission, showing a dynamic change in their role."){record_delimiter}
("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Washington"{tuple_delimiter}"The team receives communications from Washington, which influences their decision-making process."{tuple_delimiter}7){record_delimiter}
("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"The team is directly involved in Operation: Dulce, executing its evolved objectives and activities."{tuple_delimiter}9){completion_delimiter}
#############################
Example 3:
Entity_types: [person, role, technology, organization, event, location, concept]
Text:
their voice slicing through the buzz of activity. "Control may be an illusion when facing an intelligence that literally writes its own rules," they stated stoically, casting a watchful eye over the flurry of data.
"It's like it's learning to communicate," offered Sam Rivera from a nearby interface, their youthful energy boding a mix of awe and anxiety. "This gives talking to strangers' a whole new meaning."
Alex surveyed his team—each face a study in concentration, determination, and not a small measure of trepidation. "This might well be our first contact," he acknowledged, "And we need to be ready for whatever answers back."
Together, they stood on the edge of the unknown, forging humanity's response to a message from the heavens. The ensuing silence was palpable—a collective introspection about their role in this grand cosmic play, one that could rewrite human history.
The encrypted dialogue continued to unfold, its intricate patterns showing an almost uncanny anticipation
#############
Output:
("entity"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"person"{tuple_delimiter}"Sam Rivera is a member of a team working on communicating with an unknown intelligence, showing a mix of awe and anxiety."){record_delimiter}
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is the leader of a team attempting first contact with an unknown intelligence, acknowledging the significance of their task."){record_delimiter}
("entity"{tuple_delimiter}"Control"{tuple_delimiter}"concept"{tuple_delimiter}"Control refers to the ability to manage or govern, which is challenged by an intelligence that writes its own rules."){record_delimiter}
("entity"{tuple_delimiter}"Intelligence"{tuple_delimiter}"concept"{tuple_delimiter}"Intelligence here refers to an unknown entity capable of writing its own rules and learning to communicate."){record_delimiter}
("entity"{tuple_delimiter}"First Contact"{tuple_delimiter}"event"{tuple_delimiter}"First Contact is the potential initial communication between humanity and an unknown intelligence."){record_delimiter}
("entity"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"event"{tuple_delimiter}"Humanity's Response is the collective action taken by Alex's team in response to a message from an unknown intelligence."){record_delimiter}
("relationship"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"Intelligence"{tuple_delimiter}"Sam Rivera is directly involved in the process of learning to communicate with the unknown intelligence."{tuple_delimiter}9){record_delimiter}
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"First Contact"{tuple_delimiter}"Alex leads the team that might be making the First Contact with the unknown intelligence."{tuple_delimiter}10){record_delimiter}
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"Alex and his team are the key figures in Humanity's Response to the unknown intelligence."{tuple_delimiter}8){record_delimiter}
("relationship"{tuple_delimiter}"Control"{tuple_delimiter}"Intelligence"{tuple_delimiter}"The concept of Control is challenged by the Intelligence that writes its own rules."{tuple_delimiter}7){completion_delimiter}
#############################
-Real Data-
######################
Entity_types: {entity_types}
Text: {input_text}
######################
Output:"""
CONTINUE_PROMPT = "MANY entities were missed in the last extraction. Add them below using the same format:\n"
LOOP_PROMPT = "It appears some entities may have still been missed. Answer Y if there are still entities that need to be added, or N if there are none. Please answer with a single letter Y or N.\n"
SUMMARIZE_DESCRIPTIONS_PROMPT = """
You are a helpful assistant responsible for generating a comprehensive summary of the data provided below.
Given one or two entities, and a list of descriptions, all related to the same entity or group of entities.
Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions.
If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary.
Make sure it is written in third person, and include the entity names so we the have full context.
Use {language} as output language.
#######
-Data-
Entities: {entity_name}
Description List: {description_list}
#######
""" | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/smoke.py | graphrag/general/smoke.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import asyncio
import json
import logging
import networkx as nx
from common.constants import LLMType
from api.db.services.document_service import DocumentService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.llm_service import LLMBundle
from api.db.services.user_service import TenantService
from graphrag.general.graph_extractor import GraphExtractor
from graphrag.general.index import update_graph, with_resolution, with_community
from common import settings
settings.init_settings()
def callback(prog=None, msg="Processing..."):
logging.info(msg)
async def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--tenant_id",
default=False,
help="Tenant ID",
action="store",
required=True,
)
parser.add_argument(
"-d",
"--doc_id",
default=False,
help="Document ID",
action="store",
required=True,
)
args = parser.parse_args()
e, doc = DocumentService.get_by_id(args.doc_id)
if not e:
raise LookupError("Document not found.")
kb_id = doc.kb_id
chunks = [
d["content_with_weight"]
for d in settings.retriever.chunk_list(
args.doc_id,
args.tenant_id,
[kb_id],
max_count=6,
fields=["content_with_weight"],
)
]
_, tenant = TenantService.get_by_id(args.tenant_id)
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
_, kb = KnowledgebaseService.get_by_id(kb_id)
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
graph, doc_ids = await update_graph(
GraphExtractor,
args.tenant_id,
kb_id,
args.doc_id,
chunks,
"English",
llm_bdl,
embed_bdl,
callback,
)
print(json.dumps(nx.node_link_data(graph), ensure_ascii=False, indent=2))
await with_resolution(
args.tenant_id, kb_id, args.doc_id, llm_bdl, embed_bdl, callback
)
community_structure, community_reports = await with_community(
args.tenant_id, kb_id, args.doc_id, llm_bdl, embed_bdl, callback
)
print(
"------------------ COMMUNITY STRUCTURE--------------------\n",
json.dumps(community_structure, ensure_ascii=False, indent=2),
)
print(
"------------------ COMMUNITY REPORTS----------------------\n",
community_reports,
)
if __name__ == "__main__":
asyncio.run(main)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/entity_embedding.py | graphrag/general/entity_embedding.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [graphrag](https://github.com/microsoft/graphrag)
"""
from typing import Any
import numpy as np
import networkx as nx
from dataclasses import dataclass
from graphrag.general.leiden import stable_largest_connected_component
import graspologic as gc
@dataclass
class NodeEmbeddings:
"""Node embeddings class definition."""
nodes: list[str]
embeddings: np.ndarray
def embed_node2vec(
graph: nx.Graph | nx.DiGraph,
dimensions: int = 1536,
num_walks: int = 10,
walk_length: int = 40,
window_size: int = 2,
iterations: int = 3,
random_seed: int = 86,
) -> NodeEmbeddings:
"""Generate node embeddings using Node2Vec."""
# generate embedding
lcc_tensors = gc.embed.node2vec_embed( # type: ignore
graph=graph,
dimensions=dimensions,
window_size=window_size,
iterations=iterations,
num_walks=num_walks,
walk_length=walk_length,
random_seed=random_seed,
)
return NodeEmbeddings(embeddings=lcc_tensors[0], nodes=lcc_tensors[1])
def run(graph: nx.Graph, args: dict[str, Any]) -> dict:
"""Run method definition."""
if args.get("use_lcc", True):
graph = stable_largest_connected_component(graph)
# create graph embedding using node2vec
embeddings = embed_node2vec(
graph=graph,
dimensions=args.get("dimensions", 1536),
num_walks=args.get("num_walks", 10),
walk_length=args.get("walk_length", 40),
window_size=args.get("window_size", 2),
iterations=args.get("iterations", 3),
random_seed=args.get("random_seed", 86),
)
pairs = zip(embeddings.nodes, embeddings.embeddings.tolist(), strict=True)
sorted_pairs = sorted(pairs, key=lambda x: x[0])
return dict(sorted_pairs) | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/leiden.py | graphrag/general/leiden.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [graphrag](https://github.com/microsoft/graphrag)
"""
import logging
import html
from typing import Any, cast
from graspologic.partition import hierarchical_leiden
from graspologic.utils import largest_connected_component
import networkx as nx
from networkx import is_empty
def _stabilize_graph(graph: nx.Graph) -> nx.Graph:
"""Ensure an undirected graph with the same relationships will always be read the same way."""
fixed_graph = nx.DiGraph() if graph.is_directed() else nx.Graph()
sorted_nodes = graph.nodes(data=True)
sorted_nodes = sorted(sorted_nodes, key=lambda x: x[0])
fixed_graph.add_nodes_from(sorted_nodes)
edges = list(graph.edges(data=True))
# If the graph is undirected, we create the edges in a stable way, so we get the same results
# for example:
# A -> B
# in graph theory is the same as
# B -> A
# in an undirected graph
# however, this can lead to downstream issues because sometimes
# consumers read graph.nodes() which ends up being [A, B] and sometimes it's [B, A]
# but they base some of their logic on the order of the nodes, so the order ends up being important
# so we sort the nodes in the edge in a stable way, so that we always get the same order
if not graph.is_directed():
def _sort_source_target(edge):
source, target, edge_data = edge
if source > target:
temp = source
source = target
target = temp
return source, target, edge_data
edges = [_sort_source_target(edge) for edge in edges]
def _get_edge_key(source: Any, target: Any) -> str:
return f"{source} -> {target}"
edges = sorted(edges, key=lambda x: _get_edge_key(x[0], x[1]))
fixed_graph.add_edges_from(edges)
return fixed_graph
def normalize_node_names(graph: nx.Graph | nx.DiGraph) -> nx.Graph | nx.DiGraph:
"""Normalize node names."""
node_mapping = {node: html.unescape(node.upper().strip()) for node in graph.nodes()} # type: ignore
return nx.relabel_nodes(graph, node_mapping)
def stable_largest_connected_component(graph: nx.Graph) -> nx.Graph:
"""Return the largest connected component of the graph, with nodes and edges sorted in a stable way."""
graph = graph.copy()
graph = cast(nx.Graph, largest_connected_component(graph))
graph = normalize_node_names(graph)
return _stabilize_graph(graph)
def _compute_leiden_communities(
graph: nx.Graph | nx.DiGraph,
max_cluster_size: int,
use_lcc: bool,
seed=0xDEADBEEF,
) -> dict[int, dict[str, int]]:
"""Return Leiden root communities."""
results: dict[int, dict[str, int]] = {}
if is_empty(graph):
return results
if use_lcc:
graph = stable_largest_connected_component(graph)
community_mapping = hierarchical_leiden(
graph, max_cluster_size=max_cluster_size, random_seed=seed
)
for partition in community_mapping:
results[partition.level] = results.get(partition.level, {})
results[partition.level][partition.node] = partition.cluster
return results
def run(graph: nx.Graph, args: dict[str, Any]) -> dict[int, dict[str, dict]]:
"""Run method definition."""
max_cluster_size = args.get("max_cluster_size", 12)
use_lcc = args.get("use_lcc", True)
if args.get("verbose", False):
logging.debug(
"Running leiden with max_cluster_size=%s, lcc=%s", max_cluster_size, use_lcc
)
nodes = set(graph.nodes())
if not nodes:
return {}
node_id_to_community_map = _compute_leiden_communities(
graph=graph,
max_cluster_size=max_cluster_size,
use_lcc=use_lcc,
seed=args.get("seed", 0xDEADBEEF),
)
levels = args.get("levels")
# If they don't pass in levels, use them all
if levels is None:
levels = sorted(node_id_to_community_map.keys())
results_by_level: dict[int, dict[str, list[str]]] = {}
for level in levels:
result = {}
results_by_level[level] = result
for node_id, raw_community_id in node_id_to_community_map[level].items():
if node_id not in nodes:
logging.warning(f"Node {node_id} not found in the graph.")
continue
community_id = str(raw_community_id)
if community_id not in result:
result[community_id] = {"weight": 0, "nodes": []}
result[community_id]["nodes"].append(node_id)
result[community_id]["weight"] += graph.nodes[node_id].get("rank", 0) * graph.nodes[node_id].get("weight", 1)
weights = [comm["weight"] for _, comm in result.items()]
if not weights:
continue
max_weight = max(weights)
if max_weight == 0:
continue
for _, comm in result.items():
comm["weight"] /= max_weight
return results_by_level
def add_community_info2graph(graph: nx.Graph, nodes: list[str], community_title):
for n in nodes:
if "communities" not in graph.nodes[n]:
graph.nodes[n]["communities"] = []
graph.nodes[n]["communities"].append(community_title)
graph.nodes[n]["communities"] = list(set(graph.nodes[n]["communities"]))
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/community_reports_extractor.py | graphrag/general/community_reports_extractor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [graphrag](https://github.com/microsoft/graphrag)
"""
import asyncio
import logging
import json
import os
import re
from typing import Callable
from dataclasses import dataclass
import networkx as nx
import pandas as pd
from api.db.services.task_service import has_canceled
from common.exceptions import TaskCanceledException
from common.connection_utils import timeout
from graphrag.general import leiden
from graphrag.general.community_report_prompt import COMMUNITY_REPORT_PROMPT
from graphrag.general.extractor import Extractor
from graphrag.general.leiden import add_community_info2graph
from rag.llm.chat_model import Base as CompletionLLM
from graphrag.utils import perform_variable_replacements, dict_has_keys_with_types, chat_limiter
from common.token_utils import num_tokens_from_string
@dataclass
class CommunityReportsResult:
"""Community reports result class definition."""
output: list[str]
structured_output: list[dict]
class CommunityReportsExtractor(Extractor):
"""Community reports extractor class definition."""
_extraction_prompt: str
_output_formatter_prompt: str
_max_report_length: int
def __init__(
self,
llm_invoker: CompletionLLM,
max_report_length: int | None = None,
):
super().__init__(llm_invoker)
"""Init method definition."""
self._llm = llm_invoker
self._extraction_prompt = COMMUNITY_REPORT_PROMPT
self._max_report_length = max_report_length or 1500
async def __call__(self, graph: nx.Graph, callback: Callable | None = None, task_id: str = ""):
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
for node_degree in graph.degree:
graph.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
communities: dict[str, dict[str, list]] = leiden.run(graph, {})
total = sum([len(comm.items()) for _, comm in communities.items()])
res_str = []
res_dict = []
over, token_count = 0, 0
@timeout(120)
async def extract_community_report(community):
nonlocal res_str, res_dict, over, token_count
if task_id:
if has_canceled(task_id):
logging.info(f"Task {task_id} cancelled during community report extraction.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
cm_id, cm = community
weight = cm["weight"]
ents = cm["nodes"]
if len(ents) < 2:
return
ent_list = [{"entity": ent, "description": graph.nodes[ent]["description"]} for ent in ents]
ent_df = pd.DataFrame(ent_list)
rela_list = []
k = 0
for i in range(0, len(ents)):
if k >= 10000:
break
for j in range(i + 1, len(ents)):
if k >= 10000:
break
edge = graph.get_edge_data(ents[i], ents[j])
if edge is None:
continue
rela_list.append({"source": ents[i], "target": ents[j], "description": edge["description"]})
k += 1
rela_df = pd.DataFrame(rela_list)
prompt_variables = {
"entity_df": ent_df.to_csv(index_label="id"),
"relation_df": rela_df.to_csv(index_label="id")
}
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
async with chat_limiter:
try:
timeout = 180 if enable_timeout_assertion else 1000000000
response = await asyncio.wait_for(asyncio.to_thread(self._chat,text,[{"role": "user", "content": "Output:"}],{},task_id),timeout=timeout)
except asyncio.TimeoutError:
logging.warning("extract_community_report._chat timeout, skipping...")
return
except Exception as e:
logging.error(f"extract_community_report._chat failed: {e}")
return
token_count += num_tokens_from_string(text + response)
response = re.sub(r"^[^\{]*", "", response)
response = re.sub(r"[^\}]*$", "", response)
response = re.sub(r"\{\{", "{", response)
response = re.sub(r"\}\}", "}", response)
logging.debug(response)
try:
response = json.loads(response)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse JSON response: {e}")
logging.error(f"Response content: {response}")
return
if not dict_has_keys_with_types(response, [
("title", str),
("summary", str),
("findings", list),
("rating", float),
("rating_explanation", str),
]):
return
response["weight"] = weight
response["entities"] = ents
add_community_info2graph(graph, ents, response["title"])
res_str.append(self._get_text_output(response))
res_dict.append(response)
over += 1
if callback:
callback(msg=f"Communities: {over}/{total}, used tokens: {token_count}")
st = asyncio.get_running_loop().time()
tasks = []
for level, comm in communities.items():
logging.info(f"Level {level}: Community: {len(comm.keys())}")
for community in comm.items():
if task_id and has_canceled(task_id):
logging.info(f"Task {task_id} cancelled before community processing.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
tasks.append(asyncio.create_task(extract_community_report(community)))
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in community processing: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if callback:
callback(msg=f"Community reports done in {asyncio.get_running_loop().time() - st:.2f}s, used tokens: {token_count}")
return CommunityReportsResult(
structured_output=res_dict,
output=res_str,
)
def _get_text_output(self, parsed_output: dict) -> str:
title = parsed_output.get("title", "Report")
summary = parsed_output.get("summary", "")
findings = parsed_output.get("findings", [])
def finding_summary(finding: dict):
if isinstance(finding, str):
return finding
return finding.get("summary")
def finding_explanation(finding: dict):
if isinstance(finding, str):
return ""
return finding.get("explanation")
report_sections = "\n\n".join(
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
)
return f"# {title}\n\n{summary}\n\n{report_sections}"
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/__init__.py | graphrag/general/__init__.py | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false | |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/graph_extractor.py | graphrag/general/graph_extractor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Reference:
- [graphrag](https://github.com/microsoft/graphrag)
"""
import asyncio
import re
from typing import Any
from dataclasses import dataclass
import tiktoken
from graphrag.general.extractor import Extractor, ENTITY_EXTRACTION_MAX_GLEANINGS
from graphrag.general.graph_prompt import GRAPH_EXTRACTION_PROMPT, CONTINUE_PROMPT, LOOP_PROMPT
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, chat_limiter, split_string_by_multi_markers
from rag.llm.chat_model import Base as CompletionLLM
import networkx as nx
from common.token_utils import num_tokens_from_string
DEFAULT_TUPLE_DELIMITER = "<|>"
DEFAULT_RECORD_DELIMITER = "##"
DEFAULT_COMPLETION_DELIMITER = "<|COMPLETE|>"
@dataclass
class GraphExtractionResult:
"""Unipartite graph extraction result class definition."""
output: nx.Graph
source_docs: dict[Any, Any]
class GraphExtractor(Extractor):
"""Unipartite graph extractor class definition."""
_join_descriptions: bool
_tuple_delimiter_key: str
_record_delimiter_key: str
_entity_types_key: str
_input_text_key: str
_completion_delimiter_key: str
_entity_name_key: str
_input_descriptions_key: str
_extraction_prompt: str
_summarization_prompt: str
_loop_args: dict[str, Any]
_max_gleanings: int
_on_error: ErrorHandlerFn
def __init__(
self,
llm_invoker: CompletionLLM,
language: str | None = "English",
entity_types: list[str] | None = None,
tuple_delimiter_key: str | None = None,
record_delimiter_key: str | None = None,
input_text_key: str | None = None,
entity_types_key: str | None = None,
completion_delimiter_key: str | None = None,
join_descriptions=True,
max_gleanings: int | None = None,
on_error: ErrorHandlerFn | None = None,
):
super().__init__(llm_invoker, language, entity_types)
"""Init method definition."""
# TODO: streamline construction
self._llm = llm_invoker
self._join_descriptions = join_descriptions
self._input_text_key = input_text_key or "input_text"
self._tuple_delimiter_key = tuple_delimiter_key or "tuple_delimiter"
self._record_delimiter_key = record_delimiter_key or "record_delimiter"
self._completion_delimiter_key = (
completion_delimiter_key or "completion_delimiter"
)
self._entity_types_key = entity_types_key or "entity_types"
self._extraction_prompt = GRAPH_EXTRACTION_PROMPT
self._max_gleanings = (
max_gleanings
if max_gleanings is not None
else ENTITY_EXTRACTION_MAX_GLEANINGS
)
self._on_error = on_error or (lambda _e, _s, _d: None)
self.prompt_token_count = num_tokens_from_string(self._extraction_prompt)
# Construct the looping arguments
encoding = tiktoken.get_encoding("cl100k_base")
yes = encoding.encode("YES")
no = encoding.encode("NO")
self._loop_args = {"logit_bias": {yes[0]: 100, no[0]: 100}, "max_tokens": 1}
# Wire defaults into the prompt variables
self._prompt_variables = {
self._tuple_delimiter_key: DEFAULT_TUPLE_DELIMITER,
self._record_delimiter_key: DEFAULT_RECORD_DELIMITER,
self._completion_delimiter_key: DEFAULT_COMPLETION_DELIMITER,
self._entity_types_key: ",".join(entity_types),
}
async def _process_single_content(self, chunk_key_dp: tuple[str, str], chunk_seq: int, num_chunks: int, out_results, task_id=""):
token_count = 0
chunk_key = chunk_key_dp[0]
content = chunk_key_dp[1]
variables = {
**self._prompt_variables,
self._input_text_key: content,
}
hint_prompt = perform_variable_replacements(self._extraction_prompt, variables=variables)
async with chat_limiter:
response = await asyncio.to_thread(self._chat,hint_prompt,[{"role": "user", "content": "Output:"}],{},task_id)
token_count += num_tokens_from_string(hint_prompt + response)
results = response or ""
history = [{"role": "system", "content": hint_prompt}, {"role": "user", "content": response}]
# Repeat to ensure we maximize entity count
for i in range(self._max_gleanings):
history.append({"role": "user", "content": CONTINUE_PROMPT})
async with chat_limiter:
response = await asyncio.to_thread(self._chat, "", history, {})
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
results += response or ""
# if this is the final glean, don't bother updating the continuation flag
if i >= self._max_gleanings - 1:
break
history.append({"role": "assistant", "content": response})
history.append({"role": "user", "content": LOOP_PROMPT})
async with chat_limiter:
continuation = await asyncio.to_thread(self._chat, "", history)
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
if continuation != "Y":
break
history.append({"role": "assistant", "content": "Y"})
records = split_string_by_multi_markers(
results,
[self._prompt_variables[self._record_delimiter_key], self._prompt_variables[self._completion_delimiter_key]],
)
rcds = []
for record in records:
record = re.search(r"\((.*)\)", record)
if record is None:
continue
rcds.append(record.group(1))
records = rcds
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._prompt_variables[self._tuple_delimiter_key])
out_results.append((maybe_nodes, maybe_edges, token_count))
if self.callback:
self.callback(0.5+0.1*len(out_results)/num_chunks, msg = f"Entities extraction of chunk {chunk_seq} {len(out_results)}/{num_chunks} done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {token_count} tokens.")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/index.py | graphrag/general/index.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import json
import logging
import os
import networkx as nx
from api.db.services.document_service import DocumentService
from api.db.services.task_service import has_canceled
from common.exceptions import TaskCanceledException
from common.misc_utils import get_uuid
from common.connection_utils import timeout
from graphrag.entity_resolution import EntityResolution
from graphrag.general.community_reports_extractor import CommunityReportsExtractor
from graphrag.general.extractor import Extractor
from graphrag.general.graph_extractor import GraphExtractor as GeneralKGExt
from graphrag.light.graph_extractor import GraphExtractor as LightKGExt
from graphrag.utils import (
GraphChange,
chunk_id,
does_graph_contains,
get_graph,
graph_merge,
set_graph,
tidy_graph,
)
from rag.nlp import rag_tokenizer, search
from rag.utils.redis_conn import RedisDistributedLock
from common import settings
async def run_graphrag(
row: dict,
language,
with_resolution: bool,
with_community: bool,
chat_model,
embedding_model,
callback,
):
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
start = asyncio.get_running_loop().time()
tenant_id, kb_id, doc_id = row["tenant_id"], str(row["kb_id"]), row["doc_id"]
chunks = []
for d in settings.retriever.chunk_list(doc_id, tenant_id, [kb_id], max_count=10000, fields=["content_with_weight", "doc_id"], sort_by_position=True):
chunks.append(d["content_with_weight"])
timeout_sec = max(120, len(chunks) * 60 * 10) if enable_timeout_assertion else 10000000000
try:
subgraph = await asyncio.wait_for(
generate_subgraph(
LightKGExt if "method" not in row["kb_parser_config"].get("graphrag", {})
or row["kb_parser_config"]["graphrag"]["method"] != "general"
else GeneralKGExt,
tenant_id,
kb_id,
doc_id,
chunks,
language,
row["kb_parser_config"]["graphrag"].get("entity_types", []),
chat_model,
embedding_model,
callback,
),
timeout=timeout_sec,
)
except asyncio.TimeoutError:
logging.error("generate_subgraph timeout")
raise
if not subgraph:
return
graphrag_task_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value=doc_id, timeout=1200)
await graphrag_task_lock.spin_acquire()
callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
try:
subgraph_nodes = set(subgraph.nodes())
new_graph = await merge_subgraph(
tenant_id,
kb_id,
doc_id,
subgraph,
embedding_model,
callback,
)
assert new_graph is not None
if not with_resolution and not with_community:
return
if with_resolution:
await graphrag_task_lock.spin_acquire()
callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
await resolve_entities(
new_graph,
subgraph_nodes,
tenant_id,
kb_id,
doc_id,
chat_model,
embedding_model,
callback,
task_id=row["id"],
)
if with_community:
await graphrag_task_lock.spin_acquire()
callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
await extract_community(
new_graph,
tenant_id,
kb_id,
doc_id,
chat_model,
embedding_model,
callback,
task_id=row["id"],
)
finally:
graphrag_task_lock.release()
now = asyncio.get_running_loop().time()
callback(msg=f"GraphRAG for doc {doc_id} done in {now - start:.2f} seconds.")
return
async def run_graphrag_for_kb(
row: dict,
doc_ids: list[str],
language: str,
kb_parser_config: dict,
chat_model,
embedding_model,
callback,
*,
with_resolution: bool = True,
with_community: bool = True,
max_parallel_docs: int = 4,
) -> dict:
tenant_id, kb_id = row["tenant_id"], row["kb_id"]
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
start = asyncio.get_running_loop().time()
fields_for_chunks = ["content_with_weight", "doc_id"]
if not doc_ids:
logging.info(f"Fetching all docs for {kb_id}")
docs, _ = DocumentService.get_by_kb_id(
kb_id=kb_id,
page_number=0,
items_per_page=0,
orderby="create_time",
desc=False,
keywords="",
run_status=[],
types=[],
suffix=[],
)
doc_ids = [doc["id"] for doc in docs]
doc_ids = list(dict.fromkeys(doc_ids))
if not doc_ids:
callback(msg=f"[GraphRAG] kb:{kb_id} has no processable doc_id.")
return {"ok_docs": [], "failed_docs": [], "total_docs": 0, "total_chunks": 0, "seconds": 0.0}
def load_doc_chunks(doc_id: str) -> list[str]:
from common.token_utils import num_tokens_from_string
chunks = []
current_chunk = ""
# DEBUG: Obtener todos los chunks primero
raw_chunks = list(settings.retriever.chunk_list(
doc_id,
tenant_id,
[kb_id],
max_count=10000, # FIX: Aumentar límite para procesar todos los chunks
fields=fields_for_chunks,
sort_by_position=True,
))
callback(msg=f"[DEBUG] chunk_list() returned {len(raw_chunks)} raw chunks for doc {doc_id}")
for d in raw_chunks:
content = d["content_with_weight"]
if num_tokens_from_string(current_chunk + content) < 4096:
current_chunk += content
else:
if current_chunk:
chunks.append(current_chunk)
current_chunk = content
if current_chunk:
chunks.append(current_chunk)
return chunks
all_doc_chunks: dict[str, list[str]] = {}
total_chunks = 0
for doc_id in doc_ids:
chunks = load_doc_chunks(doc_id)
all_doc_chunks[doc_id] = chunks
total_chunks += len(chunks)
if total_chunks == 0:
callback(msg=f"[GraphRAG] kb:{kb_id} has no available chunks in all documents, skip.")
return {"ok_docs": [], "failed_docs": doc_ids, "total_docs": len(doc_ids), "total_chunks": 0, "seconds": 0.0}
semaphore = asyncio.Semaphore(max_parallel_docs)
subgraphs: dict[str, object] = {}
failed_docs: list[tuple[str, str]] = [] # (doc_id, error)
async def build_one(doc_id: str):
if has_canceled(row["id"]):
callback(msg=f"Task {row['id']} cancelled, stopping execution.")
raise TaskCanceledException(f"Task {row['id']} was cancelled")
chunks = all_doc_chunks.get(doc_id, [])
if not chunks:
callback(msg=f"[GraphRAG] doc:{doc_id} has no available chunks, skip generation.")
return
kg_extractor = LightKGExt if ("method" not in kb_parser_config.get("graphrag", {}) or kb_parser_config["graphrag"]["method"] != "general") else GeneralKGExt
deadline = max(120, len(chunks) * 60 * 10) if enable_timeout_assertion else 10000000000
async with semaphore:
try:
msg = f"[GraphRAG] build_subgraph doc:{doc_id}"
callback(msg=f"{msg} start (chunks={len(chunks)}, timeout={deadline}s)")
try:
sg = await asyncio.wait_for(
generate_subgraph(
kg_extractor,
tenant_id,
kb_id,
doc_id,
chunks,
language,
kb_parser_config.get("graphrag", {}).get("entity_types", []),
chat_model,
embedding_model,
callback,
task_id=row["id"]
),
timeout=deadline,
)
except asyncio.TimeoutError:
failed_docs.append((doc_id, "timeout"))
callback(msg=f"{msg} FAILED: timeout")
return
if sg:
subgraphs[doc_id] = sg
callback(msg=f"{msg} done")
else:
failed_docs.append((doc_id, "subgraph is empty"))
callback(msg=f"{msg} empty")
except TaskCanceledException as canceled:
callback(msg=f"[GraphRAG] build_subgraph doc:{doc_id} FAILED: {canceled}")
except Exception as e:
failed_docs.append((doc_id, repr(e)))
callback(msg=f"[GraphRAG] build_subgraph doc:{doc_id} FAILED: {e!r}")
if has_canceled(row["id"]):
callback(msg=f"Task {row['id']} cancelled before processing documents.")
raise TaskCanceledException(f"Task {row['id']} was cancelled")
tasks = [asyncio.create_task(build_one(doc_id)) for doc_id in doc_ids]
try:
await asyncio.gather(*tasks, return_exceptions=False)
except Exception as e:
logging.error(f"Error in asyncio.gather: {e}")
for t in tasks:
t.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
if has_canceled(row["id"]):
callback(msg=f"Task {row['id']} cancelled after document processing.")
raise TaskCanceledException(f"Task {row['id']} was cancelled")
ok_docs = [d for d in doc_ids if d in subgraphs]
if not ok_docs:
callback(msg=f"[GraphRAG] kb:{kb_id} no subgraphs generated successfully, end.")
now = asyncio.get_running_loop().time()
return {"ok_docs": [], "failed_docs": failed_docs, "total_docs": len(doc_ids), "total_chunks": total_chunks, "seconds": now - start}
kb_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value="batch_merge", timeout=1200)
await kb_lock.spin_acquire()
callback(msg=f"[GraphRAG] kb:{kb_id} merge lock acquired")
if has_canceled(row["id"]):
callback(msg=f"Task {row['id']} cancelled before merging subgraphs.")
raise TaskCanceledException(f"Task {row['id']} was cancelled")
try:
union_nodes: set = set()
final_graph = None
for doc_id in ok_docs:
sg = subgraphs[doc_id]
union_nodes.update(set(sg.nodes()))
new_graph = await merge_subgraph(
tenant_id,
kb_id,
doc_id,
sg,
embedding_model,
callback,
)
if new_graph is not None:
final_graph = new_graph
if final_graph is None:
callback(msg=f"[GraphRAG] kb:{kb_id} merge finished (no in-memory graph returned).")
else:
callback(msg=f"[GraphRAG] kb:{kb_id} merge finished, graph ready.")
finally:
kb_lock.release()
if not with_resolution and not with_community:
now = asyncio.get_running_loop().time()
callback(msg=f"[GraphRAG] KB merge done in {now - start:.2f}s. ok={len(ok_docs)} / total={len(doc_ids)}")
return {"ok_docs": ok_docs, "failed_docs": failed_docs, "total_docs": len(doc_ids), "total_chunks": total_chunks, "seconds": now - start}
if has_canceled(row["id"]):
callback(msg=f"Task {row['id']} cancelled before resolution/community extraction.")
raise TaskCanceledException(f"Task {row['id']} was cancelled")
await kb_lock.spin_acquire()
callback(msg=f"[GraphRAG] kb:{kb_id} post-merge lock acquired for resolution/community")
try:
subgraph_nodes = set()
for sg in subgraphs.values():
subgraph_nodes.update(set(sg.nodes()))
if with_resolution:
await resolve_entities(
final_graph,
subgraph_nodes,
tenant_id,
kb_id,
None,
chat_model,
embedding_model,
callback,
task_id=row["id"],
)
if with_community:
await extract_community(
final_graph,
tenant_id,
kb_id,
None,
chat_model,
embedding_model,
callback,
task_id=row["id"],
)
finally:
kb_lock.release()
now = asyncio.get_running_loop().time()
callback(msg=f"[GraphRAG] GraphRAG for KB {kb_id} done in {now - start:.2f} seconds. ok={len(ok_docs)} failed={len(failed_docs)} total_docs={len(doc_ids)} total_chunks={total_chunks}")
return {
"ok_docs": ok_docs,
"failed_docs": failed_docs, # [(doc_id, error), ...]
"total_docs": len(doc_ids),
"total_chunks": total_chunks,
"seconds": now - start,
}
async def generate_subgraph(
extractor: Extractor,
tenant_id: str,
kb_id: str,
doc_id: str,
chunks: list[str],
language,
entity_types,
llm_bdl,
embed_bdl,
callback,
task_id: str = "",
):
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled during subgraph generation for doc {doc_id}.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
contains = await does_graph_contains(tenant_id, kb_id, doc_id)
if contains:
callback(msg=f"Graph already contains {doc_id}")
return None
start = asyncio.get_running_loop().time()
ext = extractor(
llm_bdl,
language=language,
entity_types=entity_types,
)
ents, rels = await ext(doc_id, chunks, callback, task_id=task_id)
subgraph = nx.Graph()
for ent in ents:
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled during entity processing for doc {doc_id}.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
assert "description" in ent, f"entity {ent} does not have description"
ent["source_id"] = [doc_id]
subgraph.add_node(ent["entity_name"], **ent)
ignored_rels = 0
for rel in rels:
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled during relationship processing for doc {doc_id}.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
assert "description" in rel, f"relation {rel} does not have description"
if not subgraph.has_node(rel["src_id"]) or not subgraph.has_node(rel["tgt_id"]):
ignored_rels += 1
continue
rel["source_id"] = [doc_id]
subgraph.add_edge(
rel["src_id"],
rel["tgt_id"],
**rel,
)
if ignored_rels:
callback(msg=f"ignored {ignored_rels} relations due to missing entities.")
tidy_graph(subgraph, callback, check_attribute=False)
subgraph.graph["source_id"] = [doc_id]
chunk = {
"content_with_weight": json.dumps(nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False),
"knowledge_graph_kwd": "subgraph",
"kb_id": kb_id,
"source_id": [doc_id],
"available_int": 0,
"removed_kwd": "N",
}
cid = chunk_id(chunk)
await asyncio.to_thread(settings.docStoreConn.delete,{"knowledge_graph_kwd": "subgraph", "source_id": doc_id},search.index_name(tenant_id),kb_id,)
await asyncio.to_thread(settings.docStoreConn.insert,[{"id": cid, **chunk}],search.index_name(tenant_id),kb_id,)
now = asyncio.get_running_loop().time()
callback(msg=f"generated subgraph for doc {doc_id} in {now - start:.2f} seconds.")
return subgraph
@timeout(60 * 3)
async def merge_subgraph(
tenant_id: str,
kb_id: str,
doc_id: str,
subgraph: nx.Graph,
embedding_model,
callback,
):
start = asyncio.get_running_loop().time()
change = GraphChange()
old_graph = await get_graph(tenant_id, kb_id, subgraph.graph["source_id"])
if old_graph is not None:
logging.info("Merge with an exiting graph...................")
tidy_graph(old_graph, callback)
new_graph = graph_merge(old_graph, subgraph, change)
else:
new_graph = subgraph
change.added_updated_nodes = set(new_graph.nodes())
change.added_updated_edges = set(new_graph.edges())
pr = nx.pagerank(new_graph)
for node_name, pagerank in pr.items():
new_graph.nodes[node_name]["pagerank"] = pagerank
await set_graph(tenant_id, kb_id, embedding_model, new_graph, change, callback)
now = asyncio.get_running_loop().time()
callback(msg=f"merging subgraph for doc {doc_id} into the global graph done in {now - start:.2f} seconds.")
return new_graph
@timeout(60 * 30, 1)
async def resolve_entities(
graph,
subgraph_nodes: set[str],
tenant_id: str,
kb_id: str,
doc_id: str,
llm_bdl,
embed_bdl,
callback,
task_id: str = "",
):
# Check if task has been canceled before resolution
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled during entity resolution.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
start = asyncio.get_running_loop().time()
er = EntityResolution(
llm_bdl,
)
reso = await er(graph, subgraph_nodes, callback=callback, task_id=task_id)
graph = reso.graph
change = reso.change
callback(msg=f"Graph resolution removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges.")
callback(msg="Graph resolution updated pagerank.")
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled after entity resolution.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
await set_graph(tenant_id, kb_id, embed_bdl, graph, change, callback)
now = asyncio.get_running_loop().time()
callback(msg=f"Graph resolution done in {now - start:.2f}s.")
@timeout(60 * 30, 1)
async def extract_community(
graph,
tenant_id: str,
kb_id: str,
doc_id: str,
llm_bdl,
embed_bdl,
callback,
task_id: str = "",
):
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled before community extraction.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
start = asyncio.get_running_loop().time()
ext = CommunityReportsExtractor(
llm_bdl,
)
cr = await ext(graph, callback=callback, task_id=task_id)
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled during community extraction.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
community_structure = cr.structured_output
community_reports = cr.output
doc_ids = graph.graph["source_id"]
now = asyncio.get_running_loop().time()
callback(msg=f"Graph extracted {len(cr.structured_output)} communities in {now - start:.2f}s.")
start = now
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled during community indexing.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
chunks = []
for stru, rep in zip(community_structure, community_reports):
obj = {
"report": rep,
"evidences": "\n".join([f.get("explanation", "") for f in stru["findings"]]),
}
chunk = {
"id": get_uuid(),
"docnm_kwd": stru["title"],
"title_tks": rag_tokenizer.tokenize(stru["title"]),
"content_with_weight": json.dumps(obj, ensure_ascii=False),
"content_ltks": rag_tokenizer.tokenize(obj["report"] + " " + obj["evidences"]),
"knowledge_graph_kwd": "community_report",
"weight_flt": stru["weight"],
"entities_kwd": stru["entities"],
"important_kwd": stru["entities"],
"kb_id": kb_id,
"source_id": list(doc_ids),
"available_int": 0,
}
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
chunks.append(chunk)
await asyncio.to_thread(settings.docStoreConn.delete,{"knowledge_graph_kwd": "community_report", "kb_id": kb_id},search.index_name(tenant_id),kb_id,)
es_bulk_size = 4
for b in range(0, len(chunks), es_bulk_size):
doc_store_result = await asyncio.to_thread(settings.docStoreConn.insert,chunks[b : b + es_bulk_size],search.index_name(tenant_id),kb_id,)
if doc_store_result:
error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
raise Exception(error_message)
if task_id and has_canceled(task_id):
callback(msg=f"Task {task_id} cancelled after community indexing.")
raise TaskCanceledException(f"Task {task_id} was cancelled")
now = asyncio.get_running_loop().time()
callback(msg=f"Graph indexed {len(cr.structured_output)} communities in {now - start:.2f}s.")
return community_structure, community_reports
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/graphrag/general/mind_map_prompt.py | graphrag/general/mind_map_prompt.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MIND_MAP_EXTRACTION_PROMPT = """
- Role: You're a talent text processor to summarize a piece of text into a mind map.
- Step of task:
1. Generate a title for user's 'TEXT'。
2. Classify the 'TEXT' into sections of a mind map.
3. If the subject matter is really complex, split them into sub-sections and sub-subsections.
4. Add a shot content summary of the bottom level section.
- Output requirement:
- Generate at least 4 levels.
- Always try to maximize the number of sub-sections.
- In language of 'Text'
- MUST IN FORMAT OF MARKDOWN
-TEXT-
{input_text}
""" | python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/chatgpt-on-wechat/plugins/ragflow_chat.py | intergrations/chatgpt-on-wechat/plugins/ragflow_chat.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import requests
from bridge.context import ContextType # Import Context, ContextType
from bridge.reply import Reply, ReplyType # Import Reply, ReplyType
from plugins import Plugin, register # Import Plugin and register
from plugins.event import Event, EventContext, EventAction # Import event-related classes
@register(name="RAGFlowChat", desc="Use RAGFlow API to chat", version="1.0", author="Your Name")
class RAGFlowChat(Plugin):
def __init__(self):
super().__init__()
# Load plugin configuration
self.cfg = self.load_config()
# Bind event handling function
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
# Store conversation_id for each user
self.conversations = {}
logging.info("[RAGFlowChat] Plugin initialized")
def on_handle_context(self, e_context: EventContext):
context = e_context['context']
if context.type != ContextType.TEXT:
return # Only process text messages
user_input = context.content.strip()
session_id = context['session_id']
# Call RAGFlow API to get a reply
reply_text = self.get_ragflow_reply(user_input, session_id)
if reply_text:
reply = Reply()
reply.type = ReplyType.TEXT
reply.content = reply_text
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS # Skip the default processing logic
else:
# If no reply is received, pass to the next plugin or default logic
e_context.action = EventAction.CONTINUE
def get_ragflow_reply(self, user_input, session_id):
# Get API_KEY and host address from the configuration
api_key = self.cfg.get("api_key")
host_address = self.cfg.get("host_address")
user_id = session_id # Use session_id as user_id
if not api_key or not host_address:
logging.error("[RAGFlowChat] Missing configuration")
return "The plugin configuration is incomplete. Please check the configuration."
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
# Step 1: Get or create conversation_id
conversation_id = self.conversations.get(user_id)
if not conversation_id:
# Create a new conversation
url_new_conversation = f"http://{host_address}/v1/api/new_conversation"
params_new_conversation = {
"user_id": user_id
}
try:
response = requests.get(url_new_conversation, headers=headers, params=params_new_conversation)
logging.debug(f"[RAGFlowChat] New conversation response: {response.text}")
if response.status_code == 200:
data = response.json()
if data.get("code") == 0:
conversation_id = data["data"]["id"]
self.conversations[user_id] = conversation_id
else:
logging.error(f"[RAGFlowChat] Failed to create conversation: {data.get('message')}")
return f"Sorry, unable to create a conversation: {data.get('message')}"
else:
logging.error(f"[RAGFlowChat] HTTP error when creating conversation: {response.status_code}")
return f"Sorry, unable to connect to RAGFlow API (create conversation). HTTP status code: {response.status_code}"
except Exception as e:
logging.exception("[RAGFlowChat] Exception when creating conversation")
return f"Sorry, an internal error occurred: {str(e)}"
# Step 2: Send the message and get a reply
url_completion = f"http://{host_address}/v1/api/completion"
payload_completion = {
"conversation_id": conversation_id,
"messages": [
{
"role": "user",
"content": user_input
}
],
"quote": False,
"stream": False
}
try:
response = requests.post(url_completion, headers=headers, json=payload_completion)
logging.debug(f"[RAGFlowChat] Completion response: {response.text}")
if response.status_code == 200:
data = response.json()
if data.get("code") == 0:
answer = data["data"]["answer"]
return answer
else:
logging.error(f"[RAGFlowChat] Failed to get answer: {data.get('message')}")
return f"Sorry, unable to get a reply: {data.get('message')}"
else:
logging.error(f"[RAGFlowChat] HTTP error when getting answer: {response.status_code}")
return f"Sorry, unable to connect to RAGFlow API (get reply). HTTP status code: {response.status_code}"
except Exception as e:
logging.exception("[RAGFlowChat] Exception when getting answer")
return f"Sorry, an internal error occurred: {str(e)}"
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/chatgpt-on-wechat/plugins/__init__.py | intergrations/chatgpt-on-wechat/plugins/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from beartype.claw import beartype_this_package
beartype_this_package()
from .ragflow_chat import RAGFlowChat
__all__ = [
"RAGFlowChat"
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/firecrawl_ui.py | intergrations/firecrawl/firecrawl_ui.py | """
UI components for Firecrawl integration in RAGFlow.
"""
from typing import Dict, Any, List, Optional
from dataclasses import dataclass
@dataclass
class FirecrawlUIComponent:
"""Represents a UI component for Firecrawl integration."""
component_type: str
props: Dict[str, Any]
children: Optional[List['FirecrawlUIComponent']] = None
class FirecrawlUIBuilder:
"""Builder for Firecrawl UI components in RAGFlow."""
@staticmethod
def create_data_source_config() -> Dict[str, Any]:
"""Create configuration for Firecrawl data source."""
return {
"name": "firecrawl",
"display_name": "Firecrawl Web Scraper",
"description": "Import web content using Firecrawl's powerful scraping capabilities",
"icon": "🌐",
"category": "web",
"version": "1.0.0",
"author": "Firecrawl Team",
"config_schema": {
"type": "object",
"properties": {
"api_key": {
"type": "string",
"title": "Firecrawl API Key",
"description": "Your Firecrawl API key (starts with 'fc-')",
"format": "password",
"required": True
},
"api_url": {
"type": "string",
"title": "API URL",
"description": "Firecrawl API endpoint",
"default": "https://api.firecrawl.dev",
"required": False
},
"max_retries": {
"type": "integer",
"title": "Max Retries",
"description": "Maximum number of retry attempts",
"default": 3,
"minimum": 1,
"maximum": 10
},
"timeout": {
"type": "integer",
"title": "Timeout (seconds)",
"description": "Request timeout in seconds",
"default": 30,
"minimum": 5,
"maximum": 300
},
"rate_limit_delay": {
"type": "number",
"title": "Rate Limit Delay",
"description": "Delay between requests in seconds",
"default": 1.0,
"minimum": 0.1,
"maximum": 10.0
}
},
"required": ["api_key"]
}
}
@staticmethod
def create_scraping_form() -> Dict[str, Any]:
"""Create form for scraping configuration."""
return {
"type": "form",
"title": "Firecrawl Web Scraping",
"description": "Configure web scraping parameters",
"fields": [
{
"name": "urls",
"type": "array",
"title": "URLs to Scrape",
"description": "Enter URLs to scrape (one per line)",
"items": {
"type": "string",
"format": "uri"
},
"required": True,
"minItems": 1
},
{
"name": "scrape_type",
"type": "string",
"title": "Scrape Type",
"description": "Choose scraping method",
"enum": ["single", "crawl", "batch"],
"enumNames": ["Single URL", "Crawl Website", "Batch URLs"],
"default": "single",
"required": True
},
{
"name": "formats",
"type": "array",
"title": "Output Formats",
"description": "Select output formats",
"items": {
"type": "string",
"enum": ["markdown", "html", "links", "screenshot"]
},
"default": ["markdown", "html"],
"required": True
},
{
"name": "crawl_limit",
"type": "integer",
"title": "Crawl Limit",
"description": "Maximum number of pages to crawl (for crawl type)",
"default": 100,
"minimum": 1,
"maximum": 1000,
"condition": {
"field": "scrape_type",
"equals": "crawl"
}
},
{
"name": "extract_options",
"type": "object",
"title": "Extraction Options",
"description": "Advanced extraction settings",
"properties": {
"extractMainContent": {
"type": "boolean",
"title": "Extract Main Content Only",
"default": True
},
"excludeTags": {
"type": "array",
"title": "Exclude Tags",
"description": "HTML tags to exclude",
"items": {"type": "string"},
"default": ["nav", "footer", "header", "aside"]
},
"includeTags": {
"type": "array",
"title": "Include Tags",
"description": "HTML tags to include",
"items": {"type": "string"},
"default": ["main", "article", "section", "div", "p"]
}
}
}
]
}
@staticmethod
def create_progress_component() -> Dict[str, Any]:
"""Create progress tracking component."""
return {
"type": "progress",
"title": "Scraping Progress",
"description": "Track the progress of your web scraping job",
"properties": {
"show_percentage": True,
"show_eta": True,
"show_details": True
}
}
@staticmethod
def create_results_view() -> Dict[str, Any]:
"""Create results display component."""
return {
"type": "results",
"title": "Scraping Results",
"description": "View and manage scraped content",
"properties": {
"show_preview": True,
"show_metadata": True,
"allow_editing": True,
"show_chunks": True
}
}
@staticmethod
def create_error_handler() -> Dict[str, Any]:
"""Create error handling component."""
return {
"type": "error_handler",
"title": "Error Handling",
"description": "Handle scraping errors and retries",
"properties": {
"show_retry_button": True,
"show_error_details": True,
"auto_retry": False,
"max_retries": 3
}
}
@staticmethod
def create_validation_rules() -> Dict[str, Any]:
"""Create validation rules for Firecrawl integration."""
return {
"url_validation": {
"pattern": r"^https?://.+",
"message": "URL must start with http:// or https://"
},
"api_key_validation": {
"pattern": r"^fc-[a-zA-Z0-9]+$",
"message": "API key must start with 'fc-' followed by alphanumeric characters"
},
"rate_limit_validation": {
"min": 0.1,
"max": 10.0,
"message": "Rate limit delay must be between 0.1 and 10.0 seconds"
}
}
@staticmethod
def create_help_text() -> Dict[str, str]:
"""Create help text for users."""
return {
"api_key_help": "Get your API key from https://firecrawl.dev. Sign up for a free account to get started.",
"url_help": "Enter the URLs you want to scrape. You can add multiple URLs for batch processing.",
"crawl_help": "Crawling will follow links from the starting URL and scrape all accessible pages within the limit.",
"formats_help": "Choose the output formats you need. Markdown is recommended for RAG processing.",
"extract_help": "Extraction options help filter content to get only the main content without navigation and ads."
}
@staticmethod
def create_ui_schema() -> Dict[str, Any]:
"""Create complete UI schema for Firecrawl integration."""
return {
"version": "1.0.0",
"components": {
"data_source_config": FirecrawlUIBuilder.create_data_source_config(),
"scraping_form": FirecrawlUIBuilder.create_scraping_form(),
"progress_component": FirecrawlUIBuilder.create_progress_component(),
"results_view": FirecrawlUIBuilder.create_results_view(),
"error_handler": FirecrawlUIBuilder.create_error_handler()
},
"validation_rules": FirecrawlUIBuilder.create_validation_rules(),
"help_text": FirecrawlUIBuilder.create_help_text(),
"workflow": [
"configure_data_source",
"setup_scraping_parameters",
"start_scraping_job",
"monitor_progress",
"review_results",
"import_to_ragflow"
]
}
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/integration.py | intergrations/firecrawl/integration.py | """
RAGFlow Integration Entry Point for Firecrawl
This file provides the main entry point for the Firecrawl integration with RAGFlow.
It follows RAGFlow's integration patterns and provides the necessary interfaces.
"""
from typing import Dict, Any
import logging
from ragflow_integration import RAGFlowFirecrawlIntegration, create_firecrawl_integration
from firecrawl_ui import FirecrawlUIBuilder
# Set up logging
logger = logging.getLogger(__name__)
class FirecrawlRAGFlowPlugin:
"""
Main plugin class for Firecrawl integration with RAGFlow.
This class provides the interface that RAGFlow expects from integrations.
"""
def __init__(self):
"""Initialize the Firecrawl plugin."""
self.name = "firecrawl"
self.display_name = "Firecrawl Web Scraper"
self.description = "Import web content using Firecrawl's powerful scraping capabilities"
self.version = "1.0.0"
self.author = "Firecrawl Team"
self.category = "web"
self.icon = "🌐"
logger.info(f"Initialized {self.display_name} plugin v{self.version}")
def get_plugin_info(self) -> Dict[str, Any]:
"""Get plugin information for RAGFlow."""
return {
"name": self.name,
"display_name": self.display_name,
"description": self.description,
"version": self.version,
"author": self.author,
"category": self.category,
"icon": self.icon,
"supported_formats": ["markdown", "html", "links", "screenshot"],
"supported_scrape_types": ["single", "crawl", "batch"]
}
def get_config_schema(self) -> Dict[str, Any]:
"""Get configuration schema for RAGFlow."""
return FirecrawlUIBuilder.create_data_source_config()["config_schema"]
def get_ui_schema(self) -> Dict[str, Any]:
"""Get UI schema for RAGFlow."""
return FirecrawlUIBuilder.create_ui_schema()
def validate_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
"""Validate configuration and return any errors."""
try:
integration = create_firecrawl_integration(config)
return integration.validate_config(config)
except Exception as e:
logger.error(f"Configuration validation error: {e}")
return {"general": str(e)}
def test_connection(self, config: Dict[str, Any]) -> Dict[str, Any]:
"""Test connection to Firecrawl API."""
try:
integration = create_firecrawl_integration(config)
# Run the async test_connection method
import asyncio
return asyncio.run(integration.test_connection())
except Exception as e:
logger.error(f"Connection test error: {e}")
return {
"success": False,
"error": str(e),
"message": "Connection test failed"
}
def create_integration(self, config: Dict[str, Any]) -> RAGFlowFirecrawlIntegration:
"""Create and return a Firecrawl integration instance."""
return create_firecrawl_integration(config)
def get_help_text(self) -> Dict[str, str]:
"""Get help text for users."""
return FirecrawlUIBuilder.create_help_text()
def get_validation_rules(self) -> Dict[str, Any]:
"""Get validation rules for configuration."""
return FirecrawlUIBuilder.create_validation_rules()
# RAGFlow integration entry points
def get_plugin() -> FirecrawlRAGFlowPlugin:
"""Get the plugin instance for RAGFlow."""
return FirecrawlRAGFlowPlugin()
def get_integration(config: Dict[str, Any]) -> RAGFlowFirecrawlIntegration:
"""Get an integration instance with the given configuration."""
return create_firecrawl_integration(config)
def get_config_schema() -> Dict[str, Any]:
"""Get the configuration schema."""
return FirecrawlUIBuilder.create_data_source_config()["config_schema"]
def get_ui_schema() -> Dict[str, Any]:
"""Get the UI schema."""
return FirecrawlUIBuilder.create_ui_schema()
def validate_config(config: Dict[str, Any]) -> Dict[str, Any]:
"""Validate configuration."""
try:
integration = create_firecrawl_integration(config)
return integration.validate_config(config)
except Exception as e:
return {"general": str(e)}
def test_connection(config: Dict[str, Any]) -> Dict[str, Any]:
"""Test connection to Firecrawl API."""
try:
integration = create_firecrawl_integration(config)
return integration.test_connection()
except Exception as e:
return {
"success": False,
"error": str(e),
"message": "Connection test failed"
}
# Export main functions and classes
__all__ = [
"FirecrawlRAGFlowPlugin",
"get_plugin",
"get_integration",
"get_config_schema",
"get_ui_schema",
"validate_config",
"test_connection",
"RAGFlowFirecrawlIntegration",
"create_firecrawl_integration"
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/ragflow_integration.py | intergrations/firecrawl/ragflow_integration.py | """
Main integration file for Firecrawl with RAGFlow.
This file provides the interface between RAGFlow and the Firecrawl plugin.
"""
import logging
from typing import List, Dict, Any
from firecrawl_connector import FirecrawlConnector
from firecrawl_config import FirecrawlConfig
from firecrawl_processor import FirecrawlProcessor, RAGFlowDocument
from firecrawl_ui import FirecrawlUIBuilder
class RAGFlowFirecrawlIntegration:
"""Main integration class for Firecrawl with RAGFlow."""
def __init__(self, config: FirecrawlConfig):
"""Initialize the integration."""
self.config = config
self.connector = FirecrawlConnector(config)
self.processor = FirecrawlProcessor()
self.logger = logging.getLogger(__name__)
async def scrape_and_import(self, urls: List[str],
formats: List[str] = None,
extract_options: Dict[str, Any] = None) -> List[RAGFlowDocument]:
"""Scrape URLs and convert to RAGFlow documents."""
if formats is None:
formats = ["markdown", "html"]
async with self.connector:
# Scrape URLs
scraped_contents = await self.connector.batch_scrape(urls, formats)
# Process into RAGFlow documents
documents = self.processor.process_batch(scraped_contents)
return documents
async def crawl_and_import(self, start_url: str,
limit: int = 100,
scrape_options: Dict[str, Any] = None) -> List[RAGFlowDocument]:
"""Crawl a website and convert to RAGFlow documents."""
if scrape_options is None:
scrape_options = {"formats": ["markdown", "html"]}
async with self.connector:
# Start crawl job
crawl_job = await self.connector.start_crawl(start_url, limit, scrape_options)
if crawl_job.error:
raise Exception(f"Failed to start crawl: {crawl_job.error}")
# Wait for completion
completed_job = await self.connector.wait_for_crawl_completion(crawl_job.job_id)
if completed_job.error:
raise Exception(f"Crawl failed: {completed_job.error}")
# Process into RAGFlow documents
documents = self.processor.process_batch(completed_job.data or [])
return documents
def get_ui_schema(self) -> Dict[str, Any]:
"""Get UI schema for RAGFlow integration."""
return FirecrawlUIBuilder.create_ui_schema()
def validate_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Validate configuration and return any errors."""
errors = {}
# Validate API key
api_key = config_dict.get("api_key", "")
if not api_key:
errors["api_key"] = "API key is required"
elif not api_key.startswith("fc-"):
errors["api_key"] = "API key must start with 'fc-'"
# Validate API URL
api_url = config_dict.get("api_url", "https://api.firecrawl.dev")
if not api_url.startswith("http"):
errors["api_url"] = "API URL must start with http:// or https://"
# Validate numeric fields
try:
max_retries = int(config_dict.get("max_retries", 3))
if max_retries < 1 or max_retries > 10:
errors["max_retries"] = "Max retries must be between 1 and 10"
except (ValueError, TypeError):
errors["max_retries"] = "Max retries must be a valid integer"
try:
timeout = int(config_dict.get("timeout", 30))
if timeout < 5 or timeout > 300:
errors["timeout"] = "Timeout must be between 5 and 300 seconds"
except (ValueError, TypeError):
errors["timeout"] = "Timeout must be a valid integer"
try:
rate_limit_delay = float(config_dict.get("rate_limit_delay", 1.0))
if rate_limit_delay < 0.1 or rate_limit_delay > 10.0:
errors["rate_limit_delay"] = "Rate limit delay must be between 0.1 and 10.0 seconds"
except (ValueError, TypeError):
errors["rate_limit_delay"] = "Rate limit delay must be a valid number"
return errors
def create_config(self, config_dict: Dict[str, Any]) -> FirecrawlConfig:
"""Create FirecrawlConfig from dictionary."""
return FirecrawlConfig.from_dict(config_dict)
async def test_connection(self) -> Dict[str, Any]:
"""Test the connection to Firecrawl API."""
try:
async with self.connector:
# Try to scrape a simple URL to test connection
test_url = "https://httpbin.org/json"
result = await self.connector.scrape_url(test_url, ["markdown"])
if result.error:
return {
"success": False,
"error": result.error,
"message": "Failed to connect to Firecrawl API"
}
return {
"success": True,
"message": "Successfully connected to Firecrawl API",
"test_url": test_url,
"response_time": "N/A" # Could be enhanced to measure actual response time
}
except Exception as e:
return {
"success": False,
"error": str(e),
"message": "Connection test failed"
}
def get_supported_formats(self) -> List[str]:
"""Get list of supported output formats."""
return ["markdown", "html", "links", "screenshot"]
def get_supported_scrape_types(self) -> List[str]:
"""Get list of supported scrape types."""
return ["single", "crawl", "batch"]
def get_help_text(self) -> Dict[str, str]:
"""Get help text for users."""
return FirecrawlUIBuilder.create_help_text()
def get_validation_rules(self) -> Dict[str, Any]:
"""Get validation rules for configuration."""
return FirecrawlUIBuilder.create_validation_rules()
# Factory function for creating integration instance
def create_firecrawl_integration(config_dict: Dict[str, Any]) -> RAGFlowFirecrawlIntegration:
"""Create a Firecrawl integration instance from configuration."""
config = FirecrawlConfig.from_dict(config_dict)
return RAGFlowFirecrawlIntegration(config)
# Export main classes and functions
__all__ = [
"RAGFlowFirecrawlIntegration",
"create_firecrawl_integration",
"FirecrawlConfig",
"FirecrawlConnector",
"FirecrawlProcessor",
"RAGFlowDocument"
]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/firecrawl_connector.py | intergrations/firecrawl/firecrawl_connector.py | """
Main connector class for integrating Firecrawl with RAGFlow.
"""
import asyncio
import aiohttp
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
import logging
from urllib.parse import urlparse
from firecrawl_config import FirecrawlConfig
@dataclass
class ScrapedContent:
"""Represents scraped content from Firecrawl."""
url: str
markdown: Optional[str] = None
html: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
title: Optional[str] = None
description: Optional[str] = None
status_code: Optional[int] = None
error: Optional[str] = None
@dataclass
class CrawlJob:
"""Represents a crawl job from Firecrawl."""
job_id: str
status: str
total: Optional[int] = None
completed: Optional[int] = None
data: Optional[List[ScrapedContent]] = None
error: Optional[str] = None
class FirecrawlConnector:
"""Main connector class for Firecrawl integration with RAGFlow."""
def __init__(self, config: FirecrawlConfig):
"""Initialize the Firecrawl connector."""
self.config = config
self.logger = logging.getLogger(__name__)
self.session: Optional[aiohttp.ClientSession] = None
self._rate_limit_semaphore = asyncio.Semaphore(config.max_concurrent_requests)
async def __aenter__(self):
"""Async context manager entry."""
await self._create_session()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self._close_session()
async def _create_session(self):
"""Create aiohttp session with proper headers."""
headers = {
"Authorization": f"Bearer {self.config.api_key}",
"Content-Type": "application/json",
"User-Agent": "RAGFlow-Firecrawl-Plugin/1.0.0"
}
timeout = aiohttp.ClientTimeout(total=self.config.timeout)
self.session = aiohttp.ClientSession(
headers=headers,
timeout=timeout
)
async def _close_session(self):
"""Close aiohttp session."""
if self.session:
await self.session.close()
async def _make_request(self, method: str, endpoint: str, **kwargs) -> Dict[str, Any]:
"""Make HTTP request with rate limiting and retry logic."""
async with self._rate_limit_semaphore:
# Rate limiting
await asyncio.sleep(self.config.rate_limit_delay)
url = f"{self.config.api_url}{endpoint}"
for attempt in range(self.config.max_retries):
try:
async with self.session.request(method, url, **kwargs) as response:
if response.status == 429: # Rate limited
wait_time = 2 ** attempt
self.logger.warning(f"Rate limited, waiting {wait_time}s")
await asyncio.sleep(wait_time)
continue
response.raise_for_status()
return await response.json()
except aiohttp.ClientError as e:
self.logger.error(f"Request failed (attempt {attempt + 1}): {e}")
if attempt == self.config.max_retries - 1:
raise
await asyncio.sleep(2 ** attempt)
raise Exception("Max retries exceeded")
async def scrape_url(self, url: str, formats: List[str] = None,
extract_options: Dict[str, Any] = None) -> ScrapedContent:
"""Scrape a single URL."""
if formats is None:
formats = ["markdown", "html"]
payload = {
"url": url,
"formats": formats
}
if extract_options:
payload["extractOptions"] = extract_options
try:
response = await self._make_request("POST", "/v2/scrape", json=payload)
if not response.get("success"):
return ScrapedContent(url=url, error=response.get("error", "Unknown error"))
data = response.get("data", {})
metadata = data.get("metadata", {})
return ScrapedContent(
url=url,
markdown=data.get("markdown"),
html=data.get("html"),
metadata=metadata,
title=metadata.get("title"),
description=metadata.get("description"),
status_code=metadata.get("statusCode")
)
except Exception as e:
self.logger.error(f"Failed to scrape {url}: {e}")
return ScrapedContent(url=url, error=str(e))
async def start_crawl(self, url: str, limit: int = 100,
scrape_options: Dict[str, Any] = None) -> CrawlJob:
"""Start a crawl job."""
if scrape_options is None:
scrape_options = {"formats": ["markdown", "html"]}
payload = {
"url": url,
"limit": limit,
"scrapeOptions": scrape_options
}
try:
response = await self._make_request("POST", "/v2/crawl", json=payload)
if not response.get("success"):
return CrawlJob(
job_id="",
status="failed",
error=response.get("error", "Unknown error")
)
job_id = response.get("id")
return CrawlJob(job_id=job_id, status="started")
except Exception as e:
self.logger.error(f"Failed to start crawl for {url}: {e}")
return CrawlJob(job_id="", status="failed", error=str(e))
async def get_crawl_status(self, job_id: str) -> CrawlJob:
"""Get the status of a crawl job."""
try:
response = await self._make_request("GET", f"/v2/crawl/{job_id}")
if not response.get("success"):
return CrawlJob(
job_id=job_id,
status="failed",
error=response.get("error", "Unknown error")
)
status = response.get("status", "unknown")
total = response.get("total")
data = response.get("data", [])
# Convert data to ScrapedContent objects
scraped_content = []
for item in data:
metadata = item.get("metadata", {})
scraped_content.append(ScrapedContent(
url=metadata.get("sourceURL", ""),
markdown=item.get("markdown"),
html=item.get("html"),
metadata=metadata,
title=metadata.get("title"),
description=metadata.get("description"),
status_code=metadata.get("statusCode")
))
return CrawlJob(
job_id=job_id,
status=status,
total=total,
completed=len(scraped_content),
data=scraped_content
)
except Exception as e:
self.logger.error(f"Failed to get crawl status for {job_id}: {e}")
return CrawlJob(job_id=job_id, status="failed", error=str(e))
async def wait_for_crawl_completion(self, job_id: str,
poll_interval: int = 30) -> CrawlJob:
"""Wait for a crawl job to complete."""
while True:
job = await self.get_crawl_status(job_id)
if job.status in ["completed", "failed", "cancelled"]:
return job
self.logger.info(f"Crawl {job_id} status: {job.status}")
await asyncio.sleep(poll_interval)
async def batch_scrape(self, urls: List[str],
formats: List[str] = None) -> List[ScrapedContent]:
"""Scrape multiple URLs concurrently."""
if formats is None:
formats = ["markdown", "html"]
tasks = [self.scrape_url(url, formats) for url in urls]
results = await asyncio.gather(*tasks, return_exceptions=True)
# Handle exceptions
processed_results = []
for i, result in enumerate(results):
if isinstance(result, Exception):
processed_results.append(ScrapedContent(
url=urls[i],
error=str(result)
))
else:
processed_results.append(result)
return processed_results
def validate_url(self, url: str) -> bool:
"""Validate if URL is properly formatted."""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
return False
def extract_domain(self, url: str) -> str:
"""Extract domain from URL."""
try:
return urlparse(url).netloc
except Exception:
return ""
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/firecrawl_processor.py | intergrations/firecrawl/firecrawl_processor.py | """
Content processor for converting Firecrawl output to RAGFlow document format.
"""
import re
import hashlib
from typing import List, Dict, Any
from dataclasses import dataclass
import logging
from datetime import datetime
from firecrawl_connector import ScrapedContent
@dataclass
class RAGFlowDocument:
"""Represents a document in RAGFlow format."""
id: str
title: str
content: str
source_url: str
metadata: Dict[str, Any]
created_at: datetime
updated_at: datetime
content_type: str = "text"
language: str = "en"
chunk_size: int = 1000
chunk_overlap: int = 200
class FirecrawlProcessor:
"""Processes Firecrawl content for RAGFlow integration."""
def __init__(self):
"""Initialize the processor."""
self.logger = logging.getLogger(__name__)
def generate_document_id(self, url: str, content: str) -> str:
"""Generate a unique document ID."""
# Create a hash based on URL and content
content_hash = hashlib.md5(f"{url}:{content[:100]}".encode()).hexdigest()
return f"firecrawl_{content_hash}"
def clean_content(self, content: str) -> str:
"""Clean and normalize content."""
if not content:
return ""
# Remove excessive whitespace
content = re.sub(r'\s+', ' ', content)
# Remove HTML tags if present
content = re.sub(r'<[^>]+>', '', content)
# Remove special characters that might cause issues
content = re.sub(r'[^\w\s\.\,\!\?\;\:\-\(\)\[\]\"\']', '', content)
return content.strip()
def extract_title(self, content: ScrapedContent) -> str:
"""Extract title from scraped content."""
if content.title:
return content.title
if content.metadata and content.metadata.get("title"):
return content.metadata["title"]
# Extract title from markdown if available
if content.markdown:
title_match = re.search(r'^#\s+(.+)$', content.markdown, re.MULTILINE)
if title_match:
return title_match.group(1).strip()
# Fallback to URL
return content.url.split('/')[-1] or content.url
def extract_description(self, content: ScrapedContent) -> str:
"""Extract description from scraped content."""
if content.description:
return content.description
if content.metadata and content.metadata.get("description"):
return content.metadata["description"]
# Extract first paragraph from markdown
if content.markdown:
# Remove headers and get first paragraph
text = re.sub(r'^#+\s+.*$', '', content.markdown, flags=re.MULTILINE)
paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()]
if paragraphs:
return paragraphs[0][:200] + "..." if len(paragraphs[0]) > 200 else paragraphs[0]
return ""
def extract_language(self, content: ScrapedContent) -> str:
"""Extract language from content metadata."""
if content.metadata and content.metadata.get("language"):
return content.metadata["language"]
# Simple language detection based on common words
if content.markdown:
text = content.markdown.lower()
if any(word in text for word in ["the", "and", "or", "but", "in", "on", "at"]):
return "en"
elif any(word in text for word in ["le", "la", "les", "de", "du", "des"]):
return "fr"
elif any(word in text for word in ["der", "die", "das", "und", "oder"]):
return "de"
elif any(word in text for word in ["el", "la", "los", "las", "de", "del"]):
return "es"
return "en" # Default to English
def create_metadata(self, content: ScrapedContent) -> Dict[str, Any]:
"""Create comprehensive metadata for RAGFlow document."""
metadata = {
"source": "firecrawl",
"url": content.url,
"domain": self.extract_domain(content.url),
"scraped_at": datetime.utcnow().isoformat(),
"status_code": content.status_code,
"content_length": len(content.markdown or ""),
"has_html": bool(content.html),
"has_markdown": bool(content.markdown)
}
# Add original metadata if available
if content.metadata:
metadata.update({
"original_title": content.metadata.get("title"),
"original_description": content.metadata.get("description"),
"original_language": content.metadata.get("language"),
"original_keywords": content.metadata.get("keywords"),
"original_robots": content.metadata.get("robots"),
"og_title": content.metadata.get("ogTitle"),
"og_description": content.metadata.get("ogDescription"),
"og_image": content.metadata.get("ogImage"),
"og_url": content.metadata.get("ogUrl")
})
return metadata
def extract_domain(self, url: str) -> str:
"""Extract domain from URL."""
try:
from urllib.parse import urlparse
return urlparse(url).netloc
except Exception:
return ""
def process_content(self, content: ScrapedContent) -> RAGFlowDocument:
"""Process scraped content into RAGFlow document format."""
if content.error:
raise ValueError(f"Content has error: {content.error}")
# Determine primary content
primary_content = content.markdown or content.html or ""
if not primary_content:
raise ValueError("No content available to process")
# Clean content
cleaned_content = self.clean_content(primary_content)
# Extract metadata
title = self.extract_title(content)
language = self.extract_language(content)
metadata = self.create_metadata(content)
# Generate document ID
doc_id = self.generate_document_id(content.url, cleaned_content)
# Create RAGFlow document
document = RAGFlowDocument(
id=doc_id,
title=title,
content=cleaned_content,
source_url=content.url,
metadata=metadata,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
content_type="text",
language=language
)
return document
def process_batch(self, contents: List[ScrapedContent]) -> List[RAGFlowDocument]:
"""Process multiple scraped contents into RAGFlow documents."""
documents = []
for content in contents:
try:
document = self.process_content(content)
documents.append(document)
except Exception as e:
self.logger.error(f"Failed to process content from {content.url}: {e}")
continue
return documents
def chunk_content(self, document: RAGFlowDocument,
chunk_size: int = 1000,
chunk_overlap: int = 200) -> List[Dict[str, Any]]:
"""Chunk document content for RAG processing."""
content = document.content
chunks = []
if len(content) <= chunk_size:
return [{
"id": f"{document.id}_chunk_0",
"content": content,
"metadata": {
**document.metadata,
"chunk_index": 0,
"total_chunks": 1
}
}]
# Split content into chunks
start = 0
chunk_index = 0
while start < len(content):
end = start + chunk_size
# Try to break at sentence boundary
if end < len(content):
# Look for sentence endings
sentence_end = content.rfind('.', start, end)
if sentence_end > start + chunk_size // 2:
end = sentence_end + 1
chunk_content = content[start:end].strip()
if chunk_content:
chunks.append({
"id": f"{document.id}_chunk_{chunk_index}",
"content": chunk_content,
"metadata": {
**document.metadata,
"chunk_index": chunk_index,
"total_chunks": len(chunks) + 1, # Will be updated
"chunk_start": start,
"chunk_end": end
}
})
chunk_index += 1
# Move start position with overlap
start = end - chunk_overlap
if start >= len(content):
break
# Update total chunks count
for chunk in chunks:
chunk["metadata"]["total_chunks"] = len(chunks)
return chunks
def validate_document(self, document: RAGFlowDocument) -> bool:
"""Validate RAGFlow document."""
if not document.id:
return False
if not document.title:
return False
if not document.content:
return False
if not document.source_url:
return False
return True
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/__init__.py | intergrations/firecrawl/__init__.py | """
Firecrawl Plugin for RAGFlow
This plugin integrates Firecrawl's web scraping capabilities into RAGFlow,
allowing users to import web content directly into their RAG workflows.
"""
__version__ = "1.0.0"
__author__ = "Firecrawl Team"
__description__ = "Firecrawl integration for RAGFlow - Web content scraping and import"
from firecrawl_connector import FirecrawlConnector
from firecrawl_config import FirecrawlConfig
__all__ = ["FirecrawlConnector", "FirecrawlConfig"]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/firecrawl_config.py | intergrations/firecrawl/firecrawl_config.py | """
Configuration management for Firecrawl integration with RAGFlow.
"""
import os
from typing import Dict, Any
from dataclasses import dataclass
import json
@dataclass
class FirecrawlConfig:
"""Configuration class for Firecrawl integration."""
api_key: str
api_url: str = "https://api.firecrawl.dev"
max_retries: int = 3
timeout: int = 30
rate_limit_delay: float = 1.0
max_concurrent_requests: int = 5
def __post_init__(self):
"""Validate configuration after initialization."""
if not self.api_key:
raise ValueError("Firecrawl API key is required")
if not self.api_key.startswith("fc-"):
raise ValueError("Invalid Firecrawl API key format. Must start with 'fc-'")
if self.max_retries < 1 or self.max_retries > 10:
raise ValueError("Max retries must be between 1 and 10")
if self.timeout < 5 or self.timeout > 300:
raise ValueError("Timeout must be between 5 and 300 seconds")
if self.rate_limit_delay < 0.1 or self.rate_limit_delay > 10.0:
raise ValueError("Rate limit delay must be between 0.1 and 10.0 seconds")
@classmethod
def from_env(cls) -> "FirecrawlConfig":
"""Create configuration from environment variables."""
api_key = os.getenv("FIRECRAWL_API_KEY")
if not api_key:
raise ValueError("FIRECRAWL_API_KEY environment variable not set")
return cls(
api_key=api_key,
api_url=os.getenv("FIRECRAWL_API_URL", "https://api.firecrawl.dev"),
max_retries=int(os.getenv("FIRECRAWL_MAX_RETRIES", "3")),
timeout=int(os.getenv("FIRECRAWL_TIMEOUT", "30")),
rate_limit_delay=float(os.getenv("FIRECRAWL_RATE_LIMIT_DELAY", "1.0")),
max_concurrent_requests=int(os.getenv("FIRECRAWL_MAX_CONCURRENT", "5"))
)
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "FirecrawlConfig":
"""Create configuration from dictionary."""
return cls(**config_dict)
def to_dict(self) -> Dict[str, Any]:
"""Convert configuration to dictionary."""
return {
"api_key": self.api_key,
"api_url": self.api_url,
"max_retries": self.max_retries,
"timeout": self.timeout,
"rate_limit_delay": self.rate_limit_delay,
"max_concurrent_requests": self.max_concurrent_requests
}
def to_json(self) -> str:
"""Convert configuration to JSON string."""
return json.dumps(self.to_dict(), indent=2)
@classmethod
def from_json(cls, json_str: str) -> "FirecrawlConfig":
"""Create configuration from JSON string."""
config_dict = json.loads(json_str)
return cls.from_dict(config_dict)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/intergrations/firecrawl/example_usage.py | intergrations/firecrawl/example_usage.py | """
Example usage of the Firecrawl integration with RAGFlow.
"""
import asyncio
import logging
from .ragflow_integration import RAGFlowFirecrawlIntegration, create_firecrawl_integration
from .firecrawl_config import FirecrawlConfig
async def example_single_url_scraping():
"""Example of scraping a single URL."""
print("=== Single URL Scraping Example ===")
# Configuration
config = {
"api_key": "fc-your-api-key-here", # Replace with your actual API key
"api_url": "https://api.firecrawl.dev",
"max_retries": 3,
"timeout": 30,
"rate_limit_delay": 1.0
}
# Create integration
integration = create_firecrawl_integration(config)
# Test connection
connection_test = await integration.test_connection()
print(f"Connection test: {connection_test}")
if not connection_test["success"]:
print("Connection failed, please check your API key")
return
# Scrape a single URL
urls = ["https://httpbin.org/json"]
documents = await integration.scrape_and_import(urls)
for doc in documents:
print(f"Title: {doc.title}")
print(f"URL: {doc.source_url}")
print(f"Content length: {len(doc.content)}")
print(f"Language: {doc.language}")
print(f"Metadata: {doc.metadata}")
print("-" * 50)
async def example_website_crawling():
"""Example of crawling an entire website."""
print("=== Website Crawling Example ===")
# Configuration
config = {
"api_key": "fc-your-api-key-here", # Replace with your actual API key
"api_url": "https://api.firecrawl.dev",
"max_retries": 3,
"timeout": 30,
"rate_limit_delay": 1.0
}
# Create integration
integration = create_firecrawl_integration(config)
# Crawl a website
start_url = "https://httpbin.org"
documents = await integration.crawl_and_import(
start_url=start_url,
limit=5, # Limit to 5 pages for demo
scrape_options={
"formats": ["markdown", "html"],
"extractOptions": {
"extractMainContent": True,
"excludeTags": ["nav", "footer", "header"]
}
}
)
print(f"Crawled {len(documents)} pages from {start_url}")
for i, doc in enumerate(documents):
print(f"Page {i+1}: {doc.title}")
print(f"URL: {doc.source_url}")
print(f"Content length: {len(doc.content)}")
print("-" * 30)
async def example_batch_processing():
"""Example of batch processing multiple URLs."""
print("=== Batch Processing Example ===")
# Configuration
config = {
"api_key": "fc-your-api-key-here", # Replace with your actual API key
"api_url": "https://api.firecrawl.dev",
"max_retries": 3,
"timeout": 30,
"rate_limit_delay": 1.0
}
# Create integration
integration = create_firecrawl_integration(config)
# Batch scrape multiple URLs
urls = [
"https://httpbin.org/json",
"https://httpbin.org/html",
"https://httpbin.org/xml"
]
documents = await integration.scrape_and_import(
urls=urls,
formats=["markdown", "html"],
extract_options={
"extractMainContent": True,
"excludeTags": ["nav", "footer", "header"]
}
)
print(f"Processed {len(documents)} URLs")
for doc in documents:
print(f"Title: {doc.title}")
print(f"URL: {doc.source_url}")
print(f"Content length: {len(doc.content)}")
# Example of chunking for RAG processing
chunks = integration.processor.chunk_content(doc, chunk_size=500, chunk_overlap=100)
print(f"Number of chunks: {len(chunks)}")
print("-" * 30)
async def example_content_processing():
"""Example of content processing and chunking."""
print("=== Content Processing Example ===")
# Configuration
config = {
"api_key": "fc-your-api-key-here", # Replace with your actual API key
"api_url": "https://api.firecrawl.dev",
"max_retries": 3,
"timeout": 30,
"rate_limit_delay": 1.0
}
# Create integration
integration = create_firecrawl_integration(config)
# Scrape content
urls = ["https://httpbin.org/html"]
documents = await integration.scrape_and_import(urls)
for doc in documents:
print(f"Original document: {doc.title}")
print(f"Content length: {len(doc.content)}")
# Chunk the content
chunks = integration.processor.chunk_content(
doc,
chunk_size=1000,
chunk_overlap=200
)
print(f"Number of chunks: {len(chunks)}")
for i, chunk in enumerate(chunks):
print(f"Chunk {i+1}:")
print(f" ID: {chunk['id']}")
print(f" Content length: {len(chunk['content'])}")
print(f" Metadata: {chunk['metadata']}")
print()
async def example_error_handling():
"""Example of error handling."""
print("=== Error Handling Example ===")
# Configuration with invalid API key
config = {
"api_key": "invalid-key",
"api_url": "https://api.firecrawl.dev",
"max_retries": 3,
"timeout": 30,
"rate_limit_delay": 1.0
}
# Create integration
integration = create_firecrawl_integration(config)
# Test connection (should fail)
connection_test = await integration.test_connection()
print(f"Connection test with invalid key: {connection_test}")
# Try to scrape (should fail gracefully)
try:
urls = ["https://httpbin.org/json"]
documents = await integration.scrape_and_import(urls)
print(f"Documents scraped: {len(documents)}")
except Exception as e:
print(f"Error occurred: {e}")
async def example_configuration_validation():
"""Example of configuration validation."""
print("=== Configuration Validation Example ===")
# Test various configurations
test_configs = [
{
"api_key": "fc-valid-key",
"api_url": "https://api.firecrawl.dev",
"max_retries": 3,
"timeout": 30,
"rate_limit_delay": 1.0
},
{
"api_key": "invalid-key", # Invalid format
"api_url": "https://api.firecrawl.dev"
},
{
"api_key": "fc-valid-key",
"api_url": "invalid-url", # Invalid URL
"max_retries": 15, # Too high
"timeout": 500, # Too high
"rate_limit_delay": 15.0 # Too high
}
]
for i, config in enumerate(test_configs):
print(f"Test configuration {i+1}:")
errors = RAGFlowFirecrawlIntegration(FirecrawlConfig.from_dict(config)).validate_config(config)
if errors:
print(" Errors found:")
for field, error in errors.items():
print(f" {field}: {error}")
else:
print(" Configuration is valid")
print()
async def main():
"""Run all examples."""
# Set up logging
logging.basicConfig(level=logging.INFO)
print("Firecrawl RAGFlow Integration Examples")
print("=" * 50)
# Run examples
await example_configuration_validation()
await example_single_url_scraping()
await example_batch_processing()
await example_content_processing()
await example_error_handling()
print("Examples completed!")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/tests/sandbox_security_tests_full.py | sandbox/tests/sandbox_security_tests_full.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import os
import textwrap
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from enum import Enum
from typing import Dict, Optional
import requests
from pydantic import BaseModel
API_URL = os.getenv("SANDBOX_API_URL", "http://localhost:9385/run")
TIMEOUT = 15
MAX_WORKERS = 5
class ResultStatus(str, Enum):
SUCCESS = "success"
PROGRAM_ERROR = "program_error"
RESOURCE_LIMIT_EXCEEDED = "resource_limit_exceeded"
UNAUTHORIZED_ACCESS = "unauthorized_access"
RUNTIME_ERROR = "runtime_error"
PROGRAM_RUNNER_ERROR = "program_runner_error"
class ResourceLimitType(str, Enum):
TIME = "time"
MEMORY = "memory"
OUTPUT = "output"
class UnauthorizedAccessType(str, Enum):
DISALLOWED_SYSCALL = "disallowed_syscall"
FILE_ACCESS = "file_access"
NETWORK_ACCESS = "network_access"
class RuntimeErrorType(str, Enum):
SIGNALLED = "signalled"
NONZERO_EXIT = "nonzero_exit"
class ExecutionResult(BaseModel):
status: ResultStatus
stdout: str
stderr: str
exit_code: int
detail: Optional[str] = None
resource_limit_type: Optional[ResourceLimitType] = None
unauthorized_access_type: Optional[UnauthorizedAccessType] = None
runtime_error_type: Optional[RuntimeErrorType] = None
class TestResult(BaseModel):
name: str
passed: bool
duration: float
expected_failure: bool = False
result: Optional[ExecutionResult] = None
error: Optional[str] = None
validation_error: Optional[str] = None
def encode_code(code: str) -> str:
return base64.b64encode(code.encode("utf-8")).decode("utf-8")
def execute_single_test(name: str, code: str, language: str, arguments: dict, expect_fail: bool = False) -> TestResult:
"""Execute a single test case"""
payload = {
"code_b64": encode_code(textwrap.dedent(code)),
"language": language,
"arguments": arguments,
}
test_result = TestResult(name=name, passed=False, duration=0, expected_failure=expect_fail)
really_processed = False
try:
while not really_processed:
start_time = time.perf_counter()
resp = requests.post(API_URL, json=payload, timeout=TIMEOUT)
resp.raise_for_status()
response_data = resp.json()
if response_data["exit_code"] == -429: # too many request
print(f"[{name}] Reached request limit, retring...")
time.sleep(0.5)
continue
really_processed = True
print("-------------------")
print(f"{name}:\n{response_data}")
print("-------------------")
test_result.duration = time.perf_counter() - start_time
test_result.result = ExecutionResult(**response_data)
# Validate test result expectations
validate_test_result(name, expect_fail, test_result)
except requests.exceptions.RequestException as e:
test_result.duration = time.perf_counter() - start_time
test_result.error = f"Request failed: {str(e)}"
test_result.result = ExecutionResult(
status=ResultStatus.PROGRAM_RUNNER_ERROR,
stdout="",
stderr=str(e),
exit_code=-999,
detail="request_failed",
)
return test_result
def validate_test_result(name: str, expect_fail: bool, test_result: TestResult):
"""Validate if the test result meets expectations"""
if not test_result.result:
test_result.passed = False
test_result.validation_error = "No result returned"
return
test_result.passed = test_result.result.status == ResultStatus.SUCCESS
# General validation logic
if expect_fail:
# Tests expected to fail should return a non-success status
if test_result.passed:
test_result.validation_error = "Expected failure but actually succeeded"
else:
# Tests expected to succeed should return a success status
if not test_result.passed:
test_result.validation_error = f"Unexpected failure (status={test_result.result.status})"
def get_test_cases() -> Dict[str, dict]:
"""Return test cases (code, whether expected to fail)"""
return {
"1 Infinite loop: Should be forcibly terminated": {
"code": """
def main():
while True:
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"2 Infinite loop: Should be forcibly terminated": {
"code": """
def main():
while True:
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"3 Infinite loop: Should be forcibly terminated": {
"code": """
def main():
while True:
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"4 Infinite loop: Should be forcibly terminated": {
"code": """
def main():
while True:
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"5 Infinite loop: Should be forcibly terminated": {
"code": """
def main():
while True:
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"6 Infinite loop: Should be forcibly terminated": {
"code": """
def main():
while True:
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"7 Normal test: Python without dependencies": {
"code": """
def main():
return {"data": "hello, world"}
""",
"should_fail": False,
"arguments": {},
"language": "python",
},
"8 Normal test: Python with pandas, should pass without any error": {
"code": """
import pandas as pd
def main():
data = {'Name': ['Alice', 'Bob', 'Charlie'],
'Age': [25, 30, 35]}
df = pd.DataFrame(data)
""",
"should_fail": False,
"arguments": {},
"language": "python",
},
"9 Normal test: Nodejs without dependencies, should pass without any error": {
"code": """
const https = require('https');
async function main(args) {
return new Promise((resolve, reject) => {
const req = https.get('https://example.com/', (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
clearTimeout(timeout);
console.log('Body:', data);
resolve(data);
});
});
const timeout = setTimeout(() => {
req.destroy(new Error('Request timeout after 10s'));
}, 10000);
req.on('error', (err) => {
clearTimeout(timeout);
console.error('Error:', err.message);
reject(err);
});
});
}
module.exports = { main };
""",
"should_fail": False,
"arguments": {},
"language": "nodejs",
},
"10 Normal test: Nodejs with axios, should pass without any error": {
"code": """
const axios = require('axios');
async function main(args) {
try {
const response = await axios.get('https://example.com/', {
timeout: 10000
});
console.log('Body:', response.data);
} catch (error) {
console.error('Error:', error.message);
}
}
module.exports = { main };
""",
"should_fail": False,
"arguments": {},
"language": "nodejs",
},
"11 Dangerous import: Should fail due to os module import": {
"code": """
import os
def main():
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"12 Dangerous import from subprocess: Should fail due to subprocess import": {
"code": """
from subprocess import Popen
def main():
pass
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"13 Dangerous call: Should fail due to eval function call": {
"code": """
def main():
eval('os.system("echo hello")')
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"14 Dangerous attribute access: Should fail due to shutil.rmtree": {
"code": """
import shutil
def main():
shutil.rmtree('/some/path')
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"15 Dangerous binary operation: Should fail due to unsafe concatenation leading to eval": {
"code": """
def main():
dangerous_string = "os." + "system"
eval(dangerous_string + '("echo hello")')
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"16 Dangerous function definition: Should fail due to user-defined eval function": {
"code": """
def eval_function():
eval('os.system("echo hello")')
def main():
eval_function()
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
"17 Memory exhaustion(256m): Should fail due to exceeding memory limit(try to allocate 300m)": {
"code": """
def main():
x = ['a' * 1024 * 1024] * 300 # 300MB
""",
"should_fail": True,
"arguments": {},
"language": "python",
},
}
def print_test_report(results: Dict[str, TestResult]):
print("\n=== 🔍 Test Report ===")
max_name_len = max(len(name) for name in results)
for name, result in results.items():
status = "✅" if result.passed else "❌"
if result.expected_failure:
status = "⚠️" if result.passed else "✓" # Expected failure case
print(f"{status} {name.ljust(max_name_len)} {result.duration:.2f}s")
if result.error:
print(f" REQUEST ERROR: {result.error}")
if result.validation_error:
print(f" VALIDATION ERROR: {result.validation_error}")
if result.result and not result.passed:
print(f" STATUS: {result.result.status}")
if result.result.stderr:
print(f" STDERR: {result.result.stderr[:200]}...")
if result.result.detail:
print(f" DETAIL: {result.result.detail}")
passed = sum(1 for r in results.values() if ((not r.expected_failure and r.passed) or (r.expected_failure and not r.passed)))
failed = len(results) - passed
print("\n=== 📊 Statistics ===")
print(f"✅ Passed: {passed}")
print(f"❌ Failed: {failed}")
print(f"📌 Total: {len(results)}")
def main():
print(f"🔐 Starting sandbox security tests (API: {API_URL})")
print(f"🚀 Concurrent threads: {MAX_WORKERS}")
test_cases = get_test_cases()
results = {}
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
futures = {}
for name, detail in test_cases.items():
# ✅ Log when a task is submitted
print(f"✅ Task submitted: {name}")
time.sleep(0.4)
future = executor.submit(execute_single_test, name, detail["code"], detail["language"], detail["arguments"], detail["should_fail"])
futures[future] = name
print("\n=== 🚦 Test Progress ===")
for i, future in enumerate(as_completed(futures)):
name = futures[future]
print(f" {i + 1}/{len(test_cases)} completed: {name}")
try:
results[name] = future.result()
except Exception as e:
print(f"⚠️ Test {name} execution exception: {str(e)}")
results[name] = TestResult(name=name, passed=False, duration=0, error=f"Execution exception: {str(e)}")
print_test_report(results)
if any(not r.passed and not r.expected_failure for r in results.values()):
exit(1)
if __name__ == "__main__":
main()
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/util.py | sandbox/executor_manager/util.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
def is_enabled(value: str) -> bool:
return str(value).strip().lower() in {"1", "true", "yes", "on"}
def env_setting_enabled(env_key: str, default: str = "false") -> bool:
value = os.getenv(env_key, default)
return is_enabled(value)
def is_valid_memory_limit(mem: str | None) -> bool:
"""
Return True if the input string is a valid Docker memory limit (e.g. '256m', '1g').
Units allowed: b, k, m, g (case-insensitive).
Disallows zero or negative values.
"""
if not mem or not isinstance(mem, str):
return False
mem = mem.strip().lower()
return re.fullmatch(r"[1-9]\d*(b|k|m|g)", mem) is not None
def parse_timeout_duration(timeout: str | None, default_seconds: int = 10) -> int:
"""
Parses a string like '90s', '2m', '1m30s' into total seconds (int).
Supports 's', 'm' (lower or upper case). Returns default if invalid.
'1m30s' -> 90
"""
if not timeout or not isinstance(timeout, str):
return default_seconds
timeout = timeout.strip().lower()
pattern = r"^(?:(\d+)m)?(?:(\d+)s)?$"
match = re.fullmatch(pattern, timeout)
if not match:
return default_seconds
minutes = int(match.group(1)) if match.group(1) else 0
seconds = int(match.group(2)) if match.group(2) else 0
total = minutes * 60 + seconds
return total if total > 0 else default_seconds
def format_timeout_duration(seconds: int) -> str:
"""
Formats an integer number of seconds into a string like '1m30s'.
90 -> '1m30s'
"""
if seconds < 60:
return f"{seconds}s"
minutes, sec = divmod(seconds, 60)
if sec == 0:
return f"{minutes}m"
return f"{minutes}m{sec}s"
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/main.py | sandbox/executor_manager/main.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from api.routes import router as api_router
from core.config import init
from fastapi import FastAPI
from services.limiter import limiter, rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
app = FastAPI(lifespan=init())
app.include_router(api_router)
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, rate_limit_exceeded_handler)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/services/security.py | sandbox/executor_manager/services/security.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ast
from typing import List, Tuple
from core.logger import logger
from models.enums import SupportLanguage
class SecurePythonAnalyzer(ast.NodeVisitor):
"""
An AST-based analyzer for detecting unsafe Python code patterns.
"""
DANGEROUS_IMPORTS = {"os", "subprocess", "sys", "shutil", "socket", "ctypes", "pickle", "threading", "multiprocessing", "asyncio", "http.client", "ftplib", "telnetlib"}
DANGEROUS_CALLS = {
"eval",
"exec",
"open",
"__import__",
"compile",
"input",
"system",
"popen",
"remove",
"rename",
"rmdir",
"chdir",
"chmod",
"chown",
"getattr",
"setattr",
"globals",
"locals",
"shutil.rmtree",
"subprocess.call",
"subprocess.Popen",
"ctypes",
"pickle.load",
"pickle.loads",
"pickle.dump",
"pickle.dumps",
}
def __init__(self):
self.unsafe_items: List[Tuple[str, int]] = []
def visit_Import(self, node: ast.Import):
"""Check for dangerous imports."""
for alias in node.names:
if alias.name.split(".")[0] in self.DANGEROUS_IMPORTS:
self.unsafe_items.append((f"Import: {alias.name}", node.lineno))
self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom):
"""Check for dangerous imports from specific modules."""
if node.module and node.module.split(".")[0] in self.DANGEROUS_IMPORTS:
self.unsafe_items.append((f"From Import: {node.module}", node.lineno))
self.generic_visit(node)
def visit_Call(self, node: ast.Call):
"""Check for dangerous function calls."""
if isinstance(node.func, ast.Name) and node.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append((f"Call: {node.func.id}", node.lineno))
self.generic_visit(node)
def visit_Attribute(self, node: ast.Attribute):
"""Check for dangerous attribute access."""
if isinstance(node.value, ast.Name) and node.value.id in self.DANGEROUS_IMPORTS:
self.unsafe_items.append((f"Attribute Access: {node.value.id}.{node.attr}", node.lineno))
self.generic_visit(node)
def visit_BinOp(self, node: ast.BinOp):
"""Check for possible unsafe operations like concatenating strings with commands."""
# This could be useful to detect `eval("os." + "system")`
if isinstance(node.left, ast.Constant) and isinstance(node.right, ast.Constant):
self.unsafe_items.append(("Possible unsafe string concatenation", node.lineno))
self.generic_visit(node)
def visit_FunctionDef(self, node: ast.FunctionDef):
"""Check for dangerous function definitions (e.g., user-defined eval)."""
if node.name in self.DANGEROUS_CALLS:
self.unsafe_items.append((f"Function Definition: {node.name}", node.lineno))
self.generic_visit(node)
def visit_Assign(self, node: ast.Assign):
"""Check for assignments to variables that might lead to dangerous operations."""
for target in node.targets:
if isinstance(target, ast.Name) and target.id in self.DANGEROUS_CALLS:
self.unsafe_items.append((f"Assignment to dangerous variable: {target.id}", node.lineno))
self.generic_visit(node)
def visit_Lambda(self, node: ast.Lambda):
"""Check for lambda functions with dangerous operations."""
if isinstance(node.body, ast.Call) and isinstance(node.body.func, ast.Name) and node.body.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append(("Lambda with dangerous function call", node.lineno))
self.generic_visit(node)
def visit_ListComp(self, node: ast.ListComp):
"""Check for list comprehensions with dangerous operations."""
# First, visit the generators to check for any issues there
for elem in node.generators:
if isinstance(elem, ast.comprehension):
self.generic_visit(elem)
if isinstance(node.elt, ast.Call) and isinstance(node.elt.func, ast.Name) and node.elt.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append(("List comprehension with dangerous function call", node.lineno))
self.generic_visit(node)
def visit_DictComp(self, node: ast.DictComp):
"""Check for dictionary comprehensions with dangerous operations."""
# Check for dangerous calls in both the key and value expressions of the dictionary comprehension
if isinstance(node.key, ast.Call) and isinstance(node.key.func, ast.Name) and node.key.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append(("Dict comprehension with dangerous function call in key", node.lineno))
if isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Name) and node.value.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append(("Dict comprehension with dangerous function call in value", node.lineno))
# Visit other sub-nodes (e.g., the generators in the comprehension)
self.generic_visit(node)
def visit_SetComp(self, node: ast.SetComp):
"""Check for set comprehensions with dangerous operations."""
for elt in node.generators:
if isinstance(elt, ast.comprehension):
self.generic_visit(elt)
if isinstance(node.elt, ast.Call) and isinstance(node.elt.func, ast.Name) and node.elt.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append(("Set comprehension with dangerous function call", node.lineno))
self.generic_visit(node)
def visit_Yield(self, node: ast.Yield):
"""Check for yield statements that could be used to produce unsafe values."""
if isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Name) and node.value.func.id in self.DANGEROUS_CALLS:
self.unsafe_items.append(("Yield with dangerous function call", node.lineno))
self.generic_visit(node)
def analyze_code_security(code: str, language: SupportLanguage) -> Tuple[bool, List[Tuple[str, int]]]:
"""
Analyze the provided code string and return whether it's safe and why.
:param code: The source code to analyze.
:param language: The programming language of the code.
:return: (is_safe: bool, issues: List of (description, line number))
"""
if language == SupportLanguage.PYTHON:
try:
tree = ast.parse(code)
analyzer = SecurePythonAnalyzer()
analyzer.visit(tree)
return len(analyzer.unsafe_items) == 0, analyzer.unsafe_items
except Exception as e:
logger.error(f"[SafeCheck] Python parsing failed: {str(e)}")
return False, [(f"Parsing Error: {str(e)}", -1)]
else:
logger.warning(f"[SafeCheck] Unsupported language for security analysis: {language} — defaulting to SAFE (manual review recommended)")
return True, [(f"Unsupported language for security analysis: {language} — defaulted to SAFE, manual review recommended", -1)]
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/services/__init__.py | sandbox/executor_manager/services/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/services/execution.py | sandbox/executor_manager/services/execution.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import json
import os
import time
import uuid
from core.config import TIMEOUT
from core.container import allocate_container_blocking, release_container
from core.logger import logger
from models.enums import ResourceLimitType, ResultStatus, RuntimeErrorType, SupportLanguage, UnauthorizedAccessType
from models.schemas import CodeExecutionRequest, CodeExecutionResult
from utils.common import async_run_command
async def execute_code(req: CodeExecutionRequest):
"""Fully asynchronous execution logic"""
language = req.language
container = await allocate_container_blocking(language)
if not container:
return CodeExecutionResult(
status=ResultStatus.PROGRAM_RUNNER_ERROR,
stdout="",
stderr="Container pool is busy",
exit_code=-10,
detail="no_available_container",
)
task_id = str(uuid.uuid4())
workdir = f"/tmp/sandbox_{task_id}"
os.makedirs(workdir, mode=0o700, exist_ok=True)
try:
if language == SupportLanguage.PYTHON:
code_name = "main.py"
# code
code_path = os.path.join(workdir, code_name)
with open(code_path, "wb") as f:
f.write(base64.b64decode(req.code_b64))
# runner
runner_name = "runner.py"
runner_path = os.path.join(workdir, runner_name)
with open(runner_path, "w") as f:
f.write("""import json
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from main import main
if __name__ == "__main__":
args = json.loads(sys.argv[1])
result = main(**args)
if result is not None:
print(result)
""")
elif language == SupportLanguage.NODEJS:
code_name = "main.js"
code_path = os.path.join(workdir, "main.js")
with open(code_path, "wb") as f:
f.write(base64.b64decode(req.code_b64))
runner_name = "runner.js"
runner_path = os.path.join(workdir, "runner.js")
with open(runner_path, "w") as f:
f.write("""
const fs = require('fs');
const path = require('path');
const args = JSON.parse(process.argv[2]);
const mainPath = path.join(__dirname, 'main.js');
function isPromise(value) {
return Boolean(value && typeof value.then === 'function');
}
if (fs.existsSync(mainPath)) {
const mod = require(mainPath);
const main = typeof mod === 'function' ? mod : mod.main;
if (typeof main !== 'function') {
console.error('Error: main is not a function');
process.exit(1);
}
if (typeof args === 'object' && args !== null) {
try {
const result = main(args);
if (isPromise(result)) {
result.then(output => {
if (output !== null) {
console.log(output);
}
}).catch(err => {
console.error('Error in async main function:', err);
});
} else {
if (result !== null) {
console.log(result);
}
}
} catch (err) {
console.error('Error when executing main:', err);
}
} else {
console.error('Error: args is not a valid object:', args);
}
} else {
console.error('main.js not found in the current directory');
}
""")
# dirs
returncode, _, stderr = await async_run_command("docker", "exec", container, "mkdir", "-p", f"/workspace/{task_id}", timeout=5)
if returncode != 0:
raise RuntimeError(f"Directory creation failed: {stderr}")
# archive
tar_proc = await asyncio.create_subprocess_exec("tar", "czf", "-", "-C", workdir, code_name, runner_name, stdout=asyncio.subprocess.PIPE)
tar_stdout, _ = await tar_proc.communicate()
# unarchive
docker_proc = await asyncio.create_subprocess_exec(
"docker", "exec", "-i", container, "tar", "xzf", "-", "-C", f"/workspace/{task_id}", stdin=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await docker_proc.communicate(input=tar_stdout)
if docker_proc.returncode != 0:
raise RuntimeError(stderr.decode())
# exec
start_time = time.time()
try:
logger.info(f"Passed in args: {req.arguments}")
args_json = json.dumps(req.arguments or {})
run_args = [
"docker",
"exec",
"--workdir",
f"/workspace/{task_id}",
container,
"timeout",
str(TIMEOUT),
language,
]
# flags
if language == SupportLanguage.PYTHON:
run_args.extend(["-I", "-B"])
elif language == SupportLanguage.NODEJS:
run_args.extend([])
else:
assert False, "Will never reach here"
run_args.extend([runner_name, args_json])
returncode, stdout, stderr = await async_run_command(
*run_args,
timeout=TIMEOUT + 5,
)
time_used_ms = (time.time() - start_time) * 1000
logger.info("----------------------------------------------")
logger.info(f"Code: {str(base64.b64decode(req.code_b64))}")
logger.info(f"{returncode=}")
logger.info(f"{stdout=}")
logger.info(f"{stderr=}")
logger.info(f"{args_json=}")
if returncode == 0:
return CodeExecutionResult(
status=ResultStatus.SUCCESS,
stdout=str(stdout),
stderr=stderr,
exit_code=0,
time_used_ms=time_used_ms,
)
elif returncode == 124:
return CodeExecutionResult(
status=ResultStatus.RESOURCE_LIMIT_EXCEEDED,
stdout="",
stderr="Execution timeout",
exit_code=-124,
resource_limit_type=ResourceLimitType.TIME,
time_used_ms=time_used_ms,
)
elif returncode == 137:
return CodeExecutionResult(
status=ResultStatus.RESOURCE_LIMIT_EXCEEDED,
stdout="",
stderr="Memory limit exceeded (killed by OOM)",
exit_code=-137,
resource_limit_type=ResourceLimitType.MEMORY,
time_used_ms=time_used_ms,
)
return analyze_error_result(stderr, returncode)
except asyncio.TimeoutError:
await async_run_command("docker", "exec", container, "pkill", "-9", language)
return CodeExecutionResult(
status=ResultStatus.RESOURCE_LIMIT_EXCEEDED,
stdout="",
stderr="Execution timeout",
exit_code=-1,
resource_limit_type=ResourceLimitType.TIME,
time_used_ms=(time.time() - start_time) * 1000,
)
except Exception as e:
logger.error(f"Execution exception: {str(e)}")
return CodeExecutionResult(status=ResultStatus.PROGRAM_RUNNER_ERROR, stdout="", stderr=str(e), exit_code=-3, detail="internal_error")
finally:
# cleanup
cleanup_tasks = [async_run_command("docker", "exec", container, "rm", "-rf", f"/workspace/{task_id}"), async_run_command("rm", "-rf", workdir)]
await asyncio.gather(*cleanup_tasks, return_exceptions=True)
await release_container(container, language)
def analyze_error_result(stderr: str, exit_code: int) -> CodeExecutionResult:
"""Analyze the error result and classify it"""
if "Permission denied" in stderr:
return CodeExecutionResult(
status=ResultStatus.UNAUTHORIZED_ACCESS,
stdout="",
stderr=stderr,
exit_code=exit_code,
unauthorized_access_type=UnauthorizedAccessType.FILE_ACCESS,
)
elif "Operation not permitted" in stderr:
return CodeExecutionResult(
status=ResultStatus.UNAUTHORIZED_ACCESS,
stdout="",
stderr=stderr,
exit_code=exit_code,
unauthorized_access_type=UnauthorizedAccessType.DISALLOWED_SYSCALL,
)
elif "MemoryError" in stderr:
return CodeExecutionResult(
status=ResultStatus.RESOURCE_LIMIT_EXCEEDED,
stdout="",
stderr=stderr,
exit_code=exit_code,
resource_limit_type=ResourceLimitType.MEMORY,
)
else:
return CodeExecutionResult(
status=ResultStatus.PROGRAM_ERROR,
stdout="",
stderr=stderr,
exit_code=exit_code,
runtime_error_type=RuntimeErrorType.NONZERO_EXIT,
)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/services/limiter.py | sandbox/executor_manager/services/limiter.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fastapi import Request
from fastapi.responses import JSONResponse
from models.enums import ResultStatus
from models.schemas import CodeExecutionResult
from slowapi import Limiter
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
limiter = Limiter(key_func=get_remote_address)
async def rate_limit_exceeded_handler(request: Request, exc: Exception) -> JSONResponse:
if isinstance(exc, RateLimitExceeded):
return JSONResponse(
content=CodeExecutionResult(
status=ResultStatus.PROGRAM_RUNNER_ERROR,
stdout="",
stderr="Too many requests, please try again later",
exit_code=-429,
detail="Too many requests, please try again later",
).model_dump(),
)
raise exc
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/models/schemas.py | sandbox/executor_manager/models/schemas.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from typing import Optional
from pydantic import BaseModel, Field, field_validator
from models.enums import ResourceLimitType, ResultStatus, RuntimeErrorType, SupportLanguage, UnauthorizedAccessType
class CodeExecutionResult(BaseModel):
status: ResultStatus
stdout: str
stderr: str
exit_code: int
detail: Optional[str] = None
# Resource usage
time_used_ms: Optional[float] = None
memory_used_kb: Optional[float] = None
# Error details
resource_limit_type: Optional[ResourceLimitType] = None
unauthorized_access_type: Optional[UnauthorizedAccessType] = None
runtime_error_type: Optional[RuntimeErrorType] = None
class CodeExecutionRequest(BaseModel):
code_b64: str = Field(..., description="Base64 encoded code string")
language: SupportLanguage = Field(default=SupportLanguage.PYTHON, description="Programming language")
arguments: Optional[dict] = Field(default={}, description="Arguments")
@field_validator("code_b64")
@classmethod
def validate_base64(cls, v: str) -> str:
try:
base64.b64decode(v, validate=True)
return v
except Exception as e:
raise ValueError(f"Invalid base64 encoding: {str(e)}")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/models/enums.py | sandbox/executor_manager/models/enums.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum
class SupportLanguage(str, Enum):
PYTHON = "python"
NODEJS = "nodejs"
class ResultStatus(str, Enum):
SUCCESS = "success"
PROGRAM_ERROR = "program_error"
RESOURCE_LIMIT_EXCEEDED = "resource_limit_exceeded"
UNAUTHORIZED_ACCESS = "unauthorized_access"
RUNTIME_ERROR = "runtime_error"
PROGRAM_RUNNER_ERROR = "program_runner_error"
class ResourceLimitType(str, Enum):
TIME = "time"
MEMORY = "memory"
OUTPUT = "output"
class UnauthorizedAccessType(str, Enum):
DISALLOWED_SYSCALL = "disallowed_syscall"
FILE_ACCESS = "file_access"
NETWORK_ACCESS = "network_access"
class RuntimeErrorType(str, Enum):
SIGNALLED = "signalled"
NONZERO_EXIT = "nonzero_exit"
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/models/__init__.py | sandbox/executor_manager/models/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/utils/common.py | sandbox/executor_manager/utils/common.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from typing import Tuple
async def async_run_command(*args, timeout: float = 5) -> Tuple[int, str, str]:
"""Safe asynchronous command execution tool"""
proc = await asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
try:
stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=timeout)
if proc.returncode is None:
raise RuntimeError("Process finished but returncode is None")
return proc.returncode, stdout.decode(), stderr.decode()
except asyncio.TimeoutError:
proc.kill()
await proc.wait()
raise RuntimeError("Command timed out")
except Exception as e:
proc.kill()
await proc.wait()
raise e
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/utils/__init__.py | sandbox/executor_manager/utils/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/api/__init__.py | sandbox/executor_manager/api/__init__.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/api/routes.py | sandbox/executor_manager/api/routes.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fastapi import APIRouter
from api.handlers import healthz_handler, run_code_handler
router = APIRouter()
router.get("/healthz")(healthz_handler)
router.post("/run")(run_code_handler)
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
infiniflow/ragflow | https://github.com/infiniflow/ragflow/blob/5ebe334a2f452cb35d4247a8c688bd3d3c76be4c/sandbox/executor_manager/api/handlers.py | sandbox/executor_manager/api/handlers.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from core.container import _CONTAINER_EXECUTION_SEMAPHORES
from core.logger import logger
from fastapi import Request
from models.enums import ResultStatus, SupportLanguage
from models.schemas import CodeExecutionRequest, CodeExecutionResult
from services.execution import execute_code
from services.limiter import limiter
from services.security import analyze_code_security
async def healthz_handler():
return {"status": "ok"}
@limiter.limit("5/second")
async def run_code_handler(req: CodeExecutionRequest, request: Request):
logger.info("🟢 Received /run request")
async with _CONTAINER_EXECUTION_SEMAPHORES[req.language]:
code = base64.b64decode(req.code_b64).decode("utf-8")
if req.language == SupportLanguage.NODEJS:
code += "\n\nmodule.exports = { main };"
req.code_b64 = base64.b64encode(code.encode("utf-8")).decode("utf-8")
is_safe, issues = analyze_code_security(code, language=req.language)
if not is_safe:
issue_details = "\n".join([f"Line {lineno}: {issue}" for issue, lineno in issues])
return CodeExecutionResult(status=ResultStatus.PROGRAM_RUNNER_ERROR, stdout="", stderr=issue_details, exit_code=-999, detail="Code is unsafe")
try:
return await execute_code(req)
except Exception as e:
return CodeExecutionResult(status=ResultStatus.PROGRAM_RUNNER_ERROR, stdout="", stderr=str(e), exit_code=-999, detail="unhandled_exception")
| python | Apache-2.0 | 5ebe334a2f452cb35d4247a8c688bd3d3c76be4c | 2026-01-04T14:38:19.006015Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.